code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
package dk.dbc.kafka.dispatch.sources;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
/**
* Source for reading InputStreams line-by-line
* @author Adam Tulinius
*/
public class InputStreamSource extends Source<String> {
private BufferedReader reader;
public InputStreamSource(InputStream inputStream) {
this.reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
}
@Override
public Optional<String> next() throws IOException {
String line = reader.readLine();
if (line != null) {
return Optional.of(line);
} else {
return Optional.empty();
}
}
}
| DBCDK/kafka-dispatch | src/main/java/dk/dbc/kafka/dispatch/sources/InputStreamSource.java | Java | apache-2.0 | 818 |
/// <reference path="browser/ambient/angular-protractor/index.d.ts" />
/// <reference path="browser/ambient/angular/index.d.ts" />
/// <reference path="browser/ambient/assertion-error/index.d.ts" />
/// <reference path="browser/ambient/chai/index.d.ts" />
/// <reference path="browser/ambient/jquery/index.d.ts" />
/// <reference path="browser/ambient/lodash/index.d.ts" />
/// <reference path="browser/ambient/mocha/index.d.ts" />
/// <reference path="browser/ambient/selenium-webdriver/index.d.ts" />
/// <reference path="browser/ambient/sinon-chai/index.d.ts" />
/// <reference path="browser/ambient/sinon/index.d.ts" />
/// <reference path="browser/ambient/socket.io-client/index.d.ts" />
| jensim/myStream.js | typings/browser.d.ts | TypeScript | apache-2.0 | 693 |
# Piper paraense C.DC. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Piperales/Piperaceae/Piper/Piper paraense/README.md | Markdown | apache-2.0 | 170 |
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v3
import (
"context"
"time"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/wrangler/pkg/generic"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
type PodSecurityPolicyTemplateHandler func(string, *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
type PodSecurityPolicyTemplateController interface {
generic.ControllerMeta
PodSecurityPolicyTemplateClient
OnChange(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler)
OnRemove(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler)
Enqueue(name string)
EnqueueAfter(name string, duration time.Duration)
Cache() PodSecurityPolicyTemplateCache
}
type PodSecurityPolicyTemplateClient interface {
Create(*v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
Update(*v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)
Delete(name string, options *metav1.DeleteOptions) error
Get(name string, options metav1.GetOptions) (*v3.PodSecurityPolicyTemplate, error)
List(opts metav1.ListOptions) (*v3.PodSecurityPolicyTemplateList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v3.PodSecurityPolicyTemplate, err error)
}
type PodSecurityPolicyTemplateCache interface {
Get(name string) (*v3.PodSecurityPolicyTemplate, error)
List(selector labels.Selector) ([]*v3.PodSecurityPolicyTemplate, error)
AddIndexer(indexName string, indexer PodSecurityPolicyTemplateIndexer)
GetByIndex(indexName, key string) ([]*v3.PodSecurityPolicyTemplate, error)
}
type PodSecurityPolicyTemplateIndexer func(obj *v3.PodSecurityPolicyTemplate) ([]string, error)
type podSecurityPolicyTemplateController struct {
controller controller.SharedController
client *client.Client
gvk schema.GroupVersionKind
groupResource schema.GroupResource
}
func NewPodSecurityPolicyTemplateController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) PodSecurityPolicyTemplateController {
c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced)
return &podSecurityPolicyTemplateController{
controller: c,
client: c.Client(),
gvk: gvk,
groupResource: schema.GroupResource{
Group: gvk.Group,
Resource: resource,
},
}
}
func FromPodSecurityPolicyTemplateHandlerToHandler(sync PodSecurityPolicyTemplateHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v3.PodSecurityPolicyTemplate
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v3.PodSecurityPolicyTemplate))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *podSecurityPolicyTemplateController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v3.PodSecurityPolicyTemplate))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdatePodSecurityPolicyTemplateDeepCopyOnChange(client PodSecurityPolicyTemplateClient, obj *v3.PodSecurityPolicyTemplate, handler func(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error)) (*v3.PodSecurityPolicyTemplate, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *podSecurityPolicyTemplateController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler))
}
func (c *podSecurityPolicyTemplateController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler))
}
func (c *podSecurityPolicyTemplateController) OnChange(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler) {
c.AddGenericHandler(ctx, name, FromPodSecurityPolicyTemplateHandlerToHandler(sync))
}
func (c *podSecurityPolicyTemplateController) OnRemove(ctx context.Context, name string, sync PodSecurityPolicyTemplateHandler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromPodSecurityPolicyTemplateHandlerToHandler(sync)))
}
func (c *podSecurityPolicyTemplateController) Enqueue(name string) {
c.controller.Enqueue("", name)
}
func (c *podSecurityPolicyTemplateController) EnqueueAfter(name string, duration time.Duration) {
c.controller.EnqueueAfter("", name, duration)
}
func (c *podSecurityPolicyTemplateController) Informer() cache.SharedIndexInformer {
return c.controller.Informer()
}
func (c *podSecurityPolicyTemplateController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *podSecurityPolicyTemplateController) Cache() PodSecurityPolicyTemplateCache {
return &podSecurityPolicyTemplateCache{
indexer: c.Informer().GetIndexer(),
resource: c.groupResource,
}
}
func (c *podSecurityPolicyTemplateController) Create(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Create(context.TODO(), "", obj, result, metav1.CreateOptions{})
}
func (c *podSecurityPolicyTemplateController) Update(obj *v3.PodSecurityPolicyTemplate) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Update(context.TODO(), "", obj, result, metav1.UpdateOptions{})
}
func (c *podSecurityPolicyTemplateController) Delete(name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.client.Delete(context.TODO(), "", name, *options)
}
func (c *podSecurityPolicyTemplateController) Get(name string, options metav1.GetOptions) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Get(context.TODO(), "", name, result, options)
}
func (c *podSecurityPolicyTemplateController) List(opts metav1.ListOptions) (*v3.PodSecurityPolicyTemplateList, error) {
result := &v3.PodSecurityPolicyTemplateList{}
return result, c.client.List(context.TODO(), "", result, opts)
}
func (c *podSecurityPolicyTemplateController) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return c.client.Watch(context.TODO(), "", opts)
}
func (c *podSecurityPolicyTemplateController) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v3.PodSecurityPolicyTemplate, error) {
result := &v3.PodSecurityPolicyTemplate{}
return result, c.client.Patch(context.TODO(), "", name, pt, data, result, metav1.PatchOptions{}, subresources...)
}
type podSecurityPolicyTemplateCache struct {
indexer cache.Indexer
resource schema.GroupResource
}
func (c *podSecurityPolicyTemplateCache) Get(name string) (*v3.PodSecurityPolicyTemplate, error) {
obj, exists, err := c.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(c.resource, name)
}
return obj.(*v3.PodSecurityPolicyTemplate), nil
}
func (c *podSecurityPolicyTemplateCache) List(selector labels.Selector) (ret []*v3.PodSecurityPolicyTemplate, err error) {
err = cache.ListAll(c.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v3.PodSecurityPolicyTemplate))
})
return ret, err
}
func (c *podSecurityPolicyTemplateCache) AddIndexer(indexName string, indexer PodSecurityPolicyTemplateIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v3.PodSecurityPolicyTemplate))
},
}))
}
func (c *podSecurityPolicyTemplateCache) GetByIndex(indexName, key string) (result []*v3.PodSecurityPolicyTemplate, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v3.PodSecurityPolicyTemplate, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v3.PodSecurityPolicyTemplate))
}
return result, nil
}
| rancher/rancher | pkg/generated/controllers/management.cattle.io/v3/podsecuritypolicytemplate.go | GO | apache-2.0 | 9,408 |
---
layout: default.html.ejs
title: Set body class
akBodyClassAdd: 'addedClass'
---
| akashacms/akasharender | test/documents/body-class.html.md | Markdown | apache-2.0 | 84 |
package com.planet_ink.coffee_mud.Commands;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2010 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
public class Go extends StdCommand
{
public Go(){}
private String[] access={"GO","WALK"};
public String[] getAccessWords(){return access;}
public int energyExpenseFactor(){return 1;}
public void ridersBehind(Vector riders,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
if(riders!=null)
for(int r=0;r<riders.size();r++)
{
Rider rider=(Rider)riders.elementAt(r);
if(rider instanceof MOB)
{
MOB rMOB=(MOB)rider;
if((rMOB.location()==sourceRoom)
||(rMOB.location()==destRoom))
{
boolean fallOff=false;
if(rMOB.location()==sourceRoom)
{
if(rMOB.riding()!=null)
rMOB.tell("You ride "+rMOB.riding().name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(rMOB,directionCode,flee,false,true,false))
fallOff=true;
}
if(fallOff)
{
if(rMOB.riding()!=null)
rMOB.tell("You fall off "+rMOB.riding().name()+"!");
rMOB.setRiding(null);
}
}
else
rMOB.setRiding(null);
}
else
if(rider instanceof Item)
{
Item rItem=(Item)rider;
if((rItem.owner()==sourceRoom)
||(rItem.owner()==destRoom))
destRoom.bringItemHere(rItem,-1,false);
else
rItem.setRiding(null);
}
}
}
public static Vector addRiders(Rider theRider,
Rideable riding,
Vector riders)
{
if((riding!=null)&&(riding.mobileRideBasis()))
for(int r=0;r<riding.numRiders();r++)
{
Rider rider=riding.fetchRider(r);
if((rider!=null)
&&(rider!=theRider)
&&(!riders.contains(rider)))
{
riders.addElement(rider);
if(rider instanceof Rideable)
addRiders(theRider,(Rideable)rider,riders);
}
}
return riders;
}
public Vector ridersAhead(Rider theRider,
Room sourceRoom,
Room destRoom,
int directionCode,
boolean flee)
{
Vector riders=new Vector();
Rideable riding=theRider.riding();
Vector rideables=new Vector();
while((riding!=null)&&(riding.mobileRideBasis()))
{
rideables.addElement(riding);
addRiders(theRider,riding,riders);
if((riding instanceof Rider)&&((Rider)riding).riding()!=theRider.riding())
riding=((Rider)riding).riding();
else
riding=null;
}
if(theRider instanceof Rideable)
addRiders(theRider,(Rideable)theRider,riders);
for(int r=riders.size()-1;r>=0;r--)
{
Rider R=(Rider)riders.elementAt(r);
if((R instanceof Rideable)&&(((Rideable)R).numRiders()>0))
{
if(!rideables.contains(R))
rideables.addElement(R);
riders.removeElement(R);
}
}
for(int r=0;r<rideables.size();r++)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((sourceRoom).isContent((Item)riding)))
destRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&((sourceRoom).isInhabitant((MOB)riding)))
{
((MOB)riding).tell("You are ridden "+Directions.getDirectionName(directionCode)+".");
if(!move(((MOB)riding),directionCode,false,false,true,false))
{
if(theRider instanceof MOB)
((MOB)theRider).tell(((MOB)riding).name()+" won't seem to let you go that way.");
r=r-1;
for(;r>=0;r--)
{
riding=(Rideable)rideables.elementAt(r);
if((riding instanceof Item)
&&((destRoom).isContent((Item)riding)))
sourceRoom.bringItemHere((Item)riding,-1,false);
else
if((riding instanceof MOB)
&&(((MOB)riding).isMonster())
&&((destRoom).isInhabitant((MOB)riding)))
sourceRoom.bringMobHere((MOB)riding,false);
}
return null;
}
}
}
return riders;
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders)
{
return move(mob,directionCode,flee,nolook,noriders,false);
}
public boolean move(MOB mob,
int directionCode,
boolean flee,
boolean nolook,
boolean noriders,
boolean always)
{
if(directionCode<0) return false;
if(mob==null) return false;
Room thisRoom=mob.location();
if(thisRoom==null) return false;
Room destRoom=thisRoom.getRoomInDir(directionCode);
Exit exit=thisRoom.getExitInDir(directionCode);
if(destRoom==null)
{
mob.tell("You can't go that way.");
return false;
}
Exit opExit=thisRoom.getReverseExit(directionCode);
String directionName=(directionCode==Directions.GATE)&&(exit!=null)?"through "+exit.name():Directions.getDirectionName(directionCode);
String otherDirectionName=(Directions.getOpDirectionCode(directionCode)==Directions.GATE)&&(exit!=null)?exit.name():Directions.getFromDirectionName(Directions.getOpDirectionCode(directionCode));
int generalMask=always?CMMsg.MASK_ALWAYS:0;
int leaveCode=generalMask|CMMsg.MSG_LEAVE;
if(flee)
leaveCode=generalMask|CMMsg.MSG_FLEE;
CMMsg enterMsg=null;
CMMsg leaveMsg=null;
if((mob.riding()!=null)&&(mob.riding().mobileRideBasis()))
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> ride(s) "+mob.riding().name()+" in from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) with "+mob.riding().name()+" "+directionName+".":"<S-NAME> ride(s) "+mob.riding().name()+" "+directionName+"."));
}
else
{
enterMsg=CMClass.getMsg(mob,destRoom,exit,generalMask|CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,null,CMMsg.MSG_ENTER,"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_arrives)+" from "+otherDirectionName+".");
leaveMsg=CMClass.getMsg(mob,thisRoom,opExit,leaveCode,((flee)?"You flee "+directionName+".":null),leaveCode,null,leaveCode,((flee)?"<S-NAME> flee(s) "+directionName+".":"<S-NAME> "+CMLib.flags().dispositionString(mob,CMFlagLibrary.flag_leaves)+" "+directionName+"."));
}
boolean gotoAllowed=CMSecurity.isAllowed(mob,destRoom,"GOTO");
if((exit==null)&&(!gotoAllowed))
{
mob.tell("You can't go that way.");
return false;
}
else
if(exit==null)
thisRoom.showHappens(CMMsg.MSG_OK_VISUAL,"The area to the "+directionName+" shimmers and becomes transparent.");
else
if((!exit.okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
else
if(!leaveMsg.target().okMessage(mob,leaveMsg)&&(!gotoAllowed))
return false;
else
if((opExit!=null)&&(!opExit.okMessage(mob,leaveMsg))&&(!gotoAllowed))
return false;
else
if(!enterMsg.target().okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
else
if(!mob.okMessage(mob,enterMsg)&&(!gotoAllowed))
return false;
if(mob.riding()!=null)
{
if((!mob.riding().okMessage(mob,enterMsg))&&(!gotoAllowed))
return false;
}
else
{
if(!mob.isMonster())
for(int i=0;i<energyExpenseFactor();i++)
mob.curState().expendEnergy(mob,mob.maxState(),true);
if((!flee)&&(!mob.curState().adjMovement(-1,mob.maxState()))&&(!gotoAllowed))
{
mob.tell("You are too tired.");
return false;
}
if((mob.soulMate()==null)&&(mob.playerStats()!=null)&&(mob.riding()==null)&&(mob.location()!=null))
mob.playerStats().adjHygiene(mob.location().pointsPerMove(mob));
}
Vector riders=null;
if(!noriders)
{
riders=ridersAhead(mob,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(riders==null) return false;
}
Vector enterTrailersSoFar=null;
Vector leaveTrailersSoFar=null;
if((leaveMsg.trailerMsgs()!=null)&&(leaveMsg.trailerMsgs().size()>0))
{
leaveTrailersSoFar=new Vector();
leaveTrailersSoFar.addAll(leaveMsg.trailerMsgs());
leaveMsg.trailerMsgs().clear();
}
if((enterMsg.trailerMsgs()!=null)&&(enterMsg.trailerMsgs().size()>0))
{
enterTrailersSoFar=new Vector();
enterTrailersSoFar.addAll(enterMsg.trailerMsgs());
enterMsg.trailerMsgs().clear();
}
if(exit!=null) exit.executeMsg(mob,enterMsg);
if(mob.location()!=null) mob.location().delInhabitant(mob);
((Room)leaveMsg.target()).send(mob,leaveMsg);
if(enterMsg.target()==null)
{
((Room)leaveMsg.target()).bringMobHere(mob,false);
mob.tell("You can't go that way.");
return false;
}
mob.setLocation((Room)enterMsg.target());
((Room)enterMsg.target()).addInhabitant(mob);
((Room)enterMsg.target()).send(mob,enterMsg);
if(opExit!=null) opExit.executeMsg(mob,leaveMsg);
if(!nolook)
{
CMLib.commands().postLook(mob,true);
if((!mob.isMonster())
&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTOWEATHER))
&&(((Room)enterMsg.target())!=null)
&&((thisRoom.domainType()&Room.INDOORS)>0)
&&((((Room)enterMsg.target()).domainType()&Room.INDOORS)==0)
&&(((Room)enterMsg.target()).getArea().getClimateObj().weatherType(((Room)enterMsg.target()))!=Climate.WEATHER_CLEAR)
&&(((Room)enterMsg.target()).isInhabitant(mob)))
mob.tell("\n\r"+((Room)enterMsg.target()).getArea().getClimateObj().weatherDescription(((Room)enterMsg.target())));
}
if(!noriders)
ridersBehind(riders,(Room)leaveMsg.target(),(Room)enterMsg.target(),directionCode,flee);
if(!flee)
for(int f=0;f<mob.numFollowers();f++)
{
MOB follower=mob.fetchFollower(f);
if(follower!=null)
{
if((follower.amFollowing()==mob)
&&((follower.location()==thisRoom)||(follower.location()==destRoom)))
{
if((follower.location()==thisRoom)&&(CMLib.flags().aliveAwakeMobile(follower,true)))
{
if(CMath.bset(follower.getBitmap(),MOB.ATT_AUTOGUARD))
thisRoom.show(follower,null,null,CMMsg.MSG_OK_ACTION,"<S-NAME> remain(s) on guard here.");
else
{
follower.tell("You follow "+mob.name()+" "+Directions.getDirectionName(directionCode)+".");
if(!move(follower,directionCode,false,false,false,false))
{
//follower.setFollowing(null);
}
}
}
}
//else
// follower.setFollowing(null);
}
}
if((leaveTrailersSoFar!=null)&&(leaveMsg.target() instanceof Room))
for(int t=0;t<leaveTrailersSoFar.size();t++)
((Room)leaveMsg.target()).send(mob,(CMMsg)leaveTrailersSoFar.elementAt(t));
if((enterTrailersSoFar!=null)&&(enterMsg.target() instanceof Room))
for(int t=0;t<enterTrailersSoFar.size();t++)
((Room)enterMsg.target()).send(mob,(CMMsg)enterTrailersSoFar.elementAt(t));
return true;
}
protected Command stander=null;
protected Vector ifneccvec=null;
public void standIfNecessary(MOB mob, int metaFlags)
throws java.io.IOException
{
if((ifneccvec==null)||(ifneccvec.size()!=2))
{
ifneccvec=new Vector();
ifneccvec.addElement("STAND");
ifneccvec.addElement("IFNECESSARY");
}
if(stander==null) stander=CMClass.getCommand("Stand");
if((stander!=null)&&(ifneccvec!=null))
stander.execute(mob,ifneccvec,metaFlags);
}
public boolean execute(MOB mob, Vector commands, int metaFlags)
throws java.io.IOException
{
standIfNecessary(mob,metaFlags);
if((commands.size()>3)
&&(commands.firstElement() instanceof Integer))
{
return move(mob,
((Integer)commands.elementAt(0)).intValue(),
((Boolean)commands.elementAt(1)).booleanValue(),
((Boolean)commands.elementAt(2)).booleanValue(),
((Boolean)commands.elementAt(3)).booleanValue(),false);
}
String whereStr=CMParms.combine(commands,1);
Room R=mob.location();
int direction=-1;
if(whereStr.equalsIgnoreCase("OUT"))
{
if(!CMath.bset(R.domainType(),Room.INDOORS))
{
mob.tell("You aren't indoors.");
return false;
}
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if((R.getExitInDir(d)!=null)
&&(R.getRoomInDir(d)!=null)
&&(!CMath.bset(R.getRoomInDir(d).domainType(),Room.INDOORS)))
{
if(direction>=0)
{
mob.tell("Which way out? Try North, South, East, etc..");
return false;
}
direction=d;
}
if(direction<0)
{
mob.tell("There is no direct way out of this place. Try a direction.");
return false;
}
}
if(direction<0)
direction=Directions.getGoodDirectionCode(whereStr);
if(direction<0)
{
Environmental E=null;
if(R!=null)
E=R.fetchFromRoomFavorItems(null,whereStr,Item.WORNREQ_UNWORNONLY);
if(E instanceof Rideable)
{
Command C=CMClass.getCommand("Enter");
return C.execute(mob,commands,metaFlags);
}
if((E instanceof Exit)&&(R!=null))
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
if(R.getExitInDir(d)==E)
{ direction=d; break;}
}
}
String doing=(String)commands.elementAt(0);
if(direction>=0)
move(mob,direction,false,false,false,false);
else
{
boolean doneAnything=false;
if(commands.size()>2)
for(int v=1;v<commands.size();v++)
{
int num=1;
String s=(String)commands.elementAt(v);
if(CMath.s_int(s)>0)
{
num=CMath.s_int(s);
v++;
if(v<commands.size())
s=(String)commands.elementAt(v);
}
else
if(("NSEWUDnsewud".indexOf(s.charAt(s.length()-1))>=0)
&&(CMath.s_int(s.substring(0,s.length()-1))>0))
{
num=CMath.s_int(s.substring(0,s.length()-1));
s=s.substring(s.length()-1);
}
direction=Directions.getGoodDirectionCode(s);
if(direction>=0)
{
doneAnything=true;
for(int i=0;i<num;i++)
{
if(mob.isMonster())
{
if(!move(mob,direction,false,false,false,false))
return false;
}
else
{
Vector V=new Vector();
V.addElement(doing);
V.addElement(Directions.getDirectionName(direction));
mob.enqueCommand(V,metaFlags,0);
}
}
}
else
break;
}
if(!doneAnything)
mob.tell(CMStrings.capitalizeAndLower(doing)+" which direction?\n\rTry north, south, east, west, up, or down.");
}
return false;
}
public double actionsCost(MOB mob, Vector cmds){
double cost=CMath.div(CMProps.getIntVar(CMProps.SYSTEMI_DEFCMDTIME),100.0);
if((mob!=null)&&(CMath.bset(mob.getBitmap(),MOB.ATT_AUTORUN)))
cost /= 4.0;
return cost;
}
public boolean canBeOrdered(){return true;}
}
| robjcaskey/Unofficial-Coffee-Mud-Upstream | com/planet_ink/coffee_mud/Commands/Go.java | Java | apache-2.0 | 16,256 |
# hamcrest-string-matcher
Custom hamcrest matcher that counts occurrences of a substring.
## Current Release
The current release is 0.0.5.
## Basic Usage
```
import static io.zinx.hamcrest.string.pattern.OccurrenceMatcher.hasOccurrenceCount;
...
@Test
public void testMatch() {
String item = "Fred,Joe,John,Tim";
String searchString = ",";
int count = 3;
assertThat(item, hasOccurrenceCount(count, searchString));
}
```
## Dependency
The code uses the Apache Commons Lang3 library.
## Building with Gradle
- Clone the repo from github.
- gradlew build
## Artifact available through Jitpack.io
The artifact is available on <https://jitpack.io/>.
#### Gradle
To include it using gradle, do the following:
```
repositories {
maven {
url "https://jitpack.io"
}
}
```
```
dependencies {
compile 'com.github.zinx-io:hamcrest-string-matcher:0.0.6'
}
```
#### Maven
To include it using maven, do the following:
```
<repository>
<id>jitpack.io</id>
<url>https://jitpack.io</url>
</repository>
```
```
<dependency>
<groupId>com.github.zinx-io</groupId>
<artifactId>hamcrest-string-matcher</artifactId>
<version>0.0.6</version>
</dependency>
```
| zinx-io/hamcrest-string-matcher | README.md | Markdown | apache-2.0 | 1,251 |
#!/usr/bin/python2.7
from __future__ import print_function
# -*- coding: utf-8 -*-
import wx
import threading
import lcm
import random
import Forseti
import configurator
BLUE = (24, 25, 141)
GOLD = (241, 169, 50)
class TeamPanel(wx.Panel):
def __init__(self, remote, letter, number, name, colour, *args, **kwargs):
super(TeamPanel, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI(letter, number, name, colour)
def InitUI(self, letter, number, name, colour=None):
if colour is not None:
self.SetBackgroundColour(colour)
dc = wx.ScreenDC()
self.num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.num_ctrl.AppendText(str(number))
self.get_button = wx.Button(self, label='Get', size=(dc.GetCharWidth() * 2, dc.GetCharHeight()))
self.get_button.Bind(wx.EVT_BUTTON, self.do_get_name)
self.name_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 16,
dc.GetCharHeight()))
self.name_ctrl.AppendText(name)
name_num_box = wx.BoxSizer(wx.HORIZONTAL)
name_num_box.Add(wx.StaticText(self, label=letter,
size=(dc.GetCharWidth() * 0.6, dc.GetCharHeight())))
name_num_box.Add(self.num_ctrl)
name_num_box.Add(self.get_button)
name_num_box.Add(self.name_ctrl)
#button_box = wx.BoxSizer(wx.HORIZONTAL)
#button_box.Add(wx.Button(self, label='Reset'))
#button_box.Add(wx.Button(self, label='Configure'))
#button_box.Add(wx.Button(self, label='Disable'))
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(name_num_box, flag=wx.CENTER)
#vbox.Add(button_box, flag=wx.CENTER)
self.SetSizer(self.vbox)
self.Show(True)
def do_get_name(self, event):
self.name = configurator.get_team_name(self.number)
@property
def name(self):
return self.name_ctrl.GetValue()
@name.setter
def name(self, val):
self.name_ctrl.SetValue(val)
@property
def number(self):
try:
return int(self.num_ctrl.GetValue())
except ValueError:
return 0
@number.setter
def number(self, val):
self.num_ctrl.SetValue(str(val))
class MatchControl(wx.Panel):
def __init__(self, remote, *args, **kwargs):
super(MatchControl, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
dc = wx.ScreenDC()
match_number = wx.BoxSizer(wx.HORIZONTAL)
match_number.Add(wx.StaticText(self, label='Match #'.format(1)))
self.match_num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2,
dc.GetCharHeight()))
match_number.Add(self.match_num_ctrl)
vbox.Add(match_number, flag=wx.CENTER)
teamSizer = wx.GridSizer(3, 2)
self.team_panels = [
TeamPanel(self.remote, 'A', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'C', 0, 'Unknown Team', GOLD, self),
TeamPanel(self.remote, 'B', 0, 'Unknown Team', BLUE, self),
TeamPanel(self.remote, 'D', 0, 'Unknown Team', GOLD, self),
]
teamSizer.AddMany(
[wx.StaticText(self, label='Blue Team'),
wx.StaticText(self, label='Gold Team')] +
[(panel, 0) for panel in self.team_panels])
vbox.Add(teamSizer, flag=wx.CENTER)
buttons = wx.BoxSizer(wx.HORIZONTAL)
self.init_button = wx.Button(self, label='Init')
self.init_button.Bind(wx.EVT_BUTTON, self.do_init)
self.go_button = wx.Button(self, label='GO!')
self.go_button.Bind(wx.EVT_BUTTON, self.do_go)
self.pause_button = wx.Button(self, label='Pause')
self.pause_button.Bind(wx.EVT_BUTTON, self.do_pause)
#self.save_button = wx.Button(self, label='Save')
#self.save_button.Bind(wx.EVT_BUTTON, self.do_save)
self.time_text = wx.StaticText(self, label='0:00')
self.stage_text = wx.StaticText(self, label='Unknown')
self.remote.time_text = self.time_text
#buttons.Add(self.save_button, flag=wx.LEFT)
buttons.Add(self.init_button)
buttons.Add(self.go_button)
buttons.Add(self.pause_button)
buttons.Add(self.time_text)
buttons.Add(self.stage_text)
vbox.Add(buttons, flag=wx.CENTER)
self.SetSizer(vbox)
self.Show(True)
def do_go(self, e):
self.remote.do_go()
def do_pause(self, e):
self.remote.do_pause()
def do_save(self, e):
self.remote.do_save(self.get_match())
def do_init(self, e):
self.remote.do_init(self.get_match())
def _set_match_panel(self, match, team_idx, panel_idx):
match.team_numbers[team_idx] = self.team_panels[panel_idx].number
match.team_names[team_idx] = self.team_panels[panel_idx].name
def _set_panel_match(self, match, team_idx, panel_idx):
self.team_panels[panel_idx].number = match.team_numbers[team_idx]
self.team_panels[panel_idx].name = match.team_names[team_idx]
def get_match(self):
match = Forseti.Match()
self._set_match_panel(match, 0, 0)
self._set_match_panel(match, 1, 2)
self._set_match_panel(match, 2, 1)
self._set_match_panel(match, 3, 3)
try:
match.match_number = int(self.match_num_ctrl.GetValue())
except ValueError:
match.match_number = random.getrandbits(31)
return match
def set_match(self, match):
self._set_panel_match(match, 0, 0)
self._set_panel_match(match, 1, 2)
self._set_panel_match(match, 2, 1)
self._set_panel_match(match, 3, 3)
self.match_num_ctrl.SetValue(str(match.match_number))
def set_time(self, match):
self.time_text.SetLabel(format_time(match.game_time_so_far))
self.stage_text.SetLabel(match.stage_name)
class ScheduleControl(wx.Panel):
def __init__(self, remote, match_control, *args, **kwargs):
self.remote = remote
super(ScheduleControl, self).__init__(*args, **kwargs)
self.InitUI()
self.remote.match_list_box = self.match_list
self.match_control = match_control
def InitUI(self):
self.match_list = wx.ListBox(self)
self.match_list.Bind(wx.EVT_LISTBOX, self.choose_match)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.load_button = wx.Button(self, label='Load All')
self.load_button.Bind(wx.EVT_BUTTON, self.do_load)
hbox.Add(self.load_button)
self.clear_first = wx.CheckBox(self, label='Clear first')
self.clear_first.SetValue(True)
hbox.Add(self.clear_first)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.match_list, 1, wx.EXPAND)
vbox.Add(hbox)
self.SetSizer(vbox)
self.Show(True)
def do_load(self, e):
self.remote.do_load(self.clear_first.GetValue())
def choose_match(self, event):
self.match_control.set_match(event.GetClientData())
class MainWindow(wx.Frame):
def __init__(self, remote, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.remote = remote
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
menubar.Append(fileMenu, '&File')
self.SetMenuBar(menubar)
match_control = MatchControl(self.remote, self)
schedule_control = ScheduleControl(self.remote, match_control, self)
self.remote.match_control = match_control
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(match_control, 0, wx.ALIGN_CENTER | wx.ALIGN_TOP, 8)
vbox.Add(schedule_control, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 8)
self.Bind(wx.EVT_MENU, self.OnQuit, fitem)
self.SetSize((800, 600))
self.SetSizer(vbox)
self.SetTitle('Forseti Dashboard')
self.Centre()
self.Show(True)
def OnQuit(self, e):
self.Close()
def format_match(match):
print(match.match_number)
print(match.team_names)
print(match.team_numbers)
return '{}: {} ({}) & {} ({}) vs. {} ({}) & {} ({})'.format(
match.match_number,
match.team_names[0], match.team_numbers[0],
match.team_names[1], match.team_numbers[1],
match.team_names[2], match.team_numbers[2],
match.team_names[3], match.team_numbers[3],
)
class Remote(object):
def __init__(self):
self.lc = lcm.LCM('udpm://239.255.76.67:7667?ttl=1')
self.lc.subscribe('Schedule/Schedule', self.handle_schedule)
self.lc.subscribe('Timer/Time', self.handle_time)
self.match_list_box = None
self.match_control = None
self.thread = threading.Thread(target=self._loop)
self.thread.daemon = True
def start(self):
self.thread.start()
def _loop(self):
while True:
try:
self.lc.handle()
except Exception as ex:
print('Got exception while handling lcm message', ex)
def handle_schedule(self, channel, data):
msg = Forseti.Schedule.decode(data)
for i in range(msg.num_matches):
self.match_list_box.Insert(format_match(msg.matches[i]), i,
msg.matches[i])
def handle_time(self, channel, data):
msg = Forseti.Time.decode(data)
#wx.CallAfter(self.time_text.SetLabel, format_time(msg.game_time_so_far))
wx.CallAfter(self.match_control.set_time, msg)
def do_load(self, clear_first):
if clear_first:
self.match_list_box.Clear()
msg = Forseti.ScheduleLoadCommand()
msg.clear_first = clear_first
print('Requesting load')
self.lc.publish('Schedule/Load', msg.encode())
def do_save(self, match):
self.lc.publish('Match/Save', match.encode())
def do_init(self, match):
self.lc.publish('Match/Init', match.encode())
def do_time_ctrl(self, command):
msg = Forseti.TimeControl()
msg.command_name = command
self.lc.publish('Timer/Control', msg.encode())
def do_go(self):
self.do_time_ctrl('start')
def do_pause(self):
self.do_time_ctrl('pause')
def format_time(seconds):
return '{}:{:02}'.format(seconds // 60,
seconds % 60)
def main():
app = wx.App()
remote = Remote()
MainWindow(remote, None)
remote.start()
remote.do_load(False)
app.MainLoop()
if __name__ == '__main__':
main()
| pioneers/forseti | wxdash.py | Python | apache-2.0 | 10,763 |
/*
* Copyright 2013-2020 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.plugins;
import com.intellij.icons.AllIcons;
import com.intellij.ide.DataManager;
import com.intellij.openapi.actionSystem.ActionGroup;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.ui.ClickListener;
import com.intellij.util.ui.JBUI;
import com.intellij.util.ui.UIUtil;
import consulo.awt.TargetAWT;
import consulo.localize.LocalizeValue;
import javax.annotation.Nonnull;
import javax.swing.*;
import java.awt.event.MouseEvent;
import java.util.function.Function;
/**
* @author VISTALL
* @since 03/12/2020
*/
public class LabelPopup extends JLabel {
private final LocalizeValue myPrefix;
public LabelPopup(LocalizeValue prefix, Function<LabelPopup, ? extends ActionGroup> groupBuilder) {
myPrefix = prefix;
setForeground(UIUtil.getLabelDisabledForeground());
setBorder(JBUI.Borders.empty(1, 1, 1, 5));
setIcon(TargetAWT.to(AllIcons.General.ComboArrow));
setHorizontalTextPosition(SwingConstants.LEADING);
new ClickListener() {
@Override
public boolean onClick(@Nonnull MouseEvent event, int clickCount) {
LabelPopup component = LabelPopup.this;
JBPopupFactory.getInstance()
.createActionGroupPopup(myPrefix.get(), groupBuilder.apply(component), DataManager.getInstance().getDataContext(component), JBPopupFactory.ActionSelectionAid.SPEEDSEARCH, true)
.showUnderneathOf(component);
return true;
}
}.installOn(this);
}
public void setPrefixedText(LocalizeValue tagValue) {
setText(LocalizeValue.join(myPrefix, LocalizeValue.space(), tagValue).get());
}
}
| consulo/consulo | modules/base/platform-impl/src/main/java/com/intellij/ide/plugins/LabelPopup.java | Java | apache-2.0 | 2,225 |
/*
* nativescriptassert.h
*
* Created on: 12.11.2013
* Author: blagoev
*/
#ifndef NATIVESCRIPTASSERT_H_
#define NATIVESCRIPTASSERT_H_
#include <android/log.h>
namespace tns {
extern bool LogEnabled;
#define DEBUG_WRITE(fmt, args...) if (tns::LogEnabled) __android_log_print(ANDROID_LOG_DEBUG, "TNS.Native", fmt, ##args)
#define DEBUG_WRITE_FORCE(fmt, args...) __android_log_print(ANDROID_LOG_DEBUG, "TNS.Native", fmt, ##args)
#define DEBUG_WRITE_FATAL(fmt, args...) __android_log_print(ANDROID_LOG_FATAL, "TNS.Native", fmt, ##args)
}
#endif /* NATIVESCRIPTASSERT_H_ */
| NativeScript/android-runtime | test-app/runtime/src/main/cpp/NativeScriptAssert.h | C | apache-2.0 | 585 |
package yaputil
import (
"io/ioutil"
"net"
"regexp"
)
var (
nsRegex = regexp.MustCompile(`(?m)^nameserver\s+([0-9a-fA-F\.:]+)`)
)
func LookupIP(host string) (ips []net.IP, err error) {
return net.LookupIP(host)
}
func GetLocalNameServers() ([]string, error) {
b, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
return nil, err
}
nameservers := make([]string, 0, 4)
for _, m := range nsRegex.FindAllStringSubmatch(string(b), -1) {
nameservers = append(nameservers, m[1])
}
return nameservers, nil
}
| yaproxy/yap | yaputil/lookup.go | GO | apache-2.0 | 529 |
// Copyright (C) 2014 Space Monkey, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build cgo
package openssl
/*
#include <openssl/crypto.h>
#include <openssl/ssl.h>
#include <openssl/err.h>
#include <openssl/conf.h>
static long SSL_set_options_not_a_macro(SSL* ssl, long options) {
return SSL_set_options(ssl, options);
}
static long SSL_get_options_not_a_macro(SSL* ssl) {
return SSL_get_options(ssl);
}
static long SSL_clear_options_not_a_macro(SSL* ssl, long options) {
return SSL_clear_options(ssl, options);
}
extern int verify_ssl_cb(int ok, X509_STORE_CTX* store);
*/
import "C"
import (
"os"
"unsafe"
)
type SSLTLSExtErr int
const (
SSLTLSExtErrOK SSLTLSExtErr = C.SSL_TLSEXT_ERR_OK
SSLTLSExtErrAlertWarning SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_WARNING
SSLTLSEXTErrAlertFatal SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_FATAL
SSLTLSEXTErrNoAck SSLTLSExtErr = C.SSL_TLSEXT_ERR_NOACK
)
var (
ssl_idx = C.SSL_get_ex_new_index(0, nil, nil, nil, nil)
)
//export get_ssl_idx
func get_ssl_idx() C.int {
return ssl_idx
}
type SSL struct {
ssl *C.SSL
verify_cb VerifyCallback
}
//export verify_ssl_cb_thunk
func verify_ssl_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
defer func() {
if err := recover(); err != nil {
logger.Critf("openssl: verify callback panic'd: %v", err)
os.Exit(1)
}
}()
verify_cb := (*SSL)(p).verify_cb
// set up defaults just in case verify_cb is nil
if verify_cb != nil {
store := &CertificateStoreCtx{ctx: ctx}
if verify_cb(ok == 1, store) {
ok = 1
} else {
ok = 0
}
}
return ok
}
// Wrapper around SSL_get_servername. Returns server name according to rfc6066
// http://tools.ietf.org/html/rfc6066.
func (s *SSL) GetServername() string {
return C.GoString(C.SSL_get_servername(s.ssl, C.TLSEXT_NAMETYPE_host_name))
}
// GetOptions returns SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) GetOptions() Options {
return Options(C.SSL_get_options_not_a_macro(s.ssl))
}
// SetOptions sets SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) SetOptions(options Options) Options {
return Options(C.SSL_set_options_not_a_macro(s.ssl, C.long(options)))
}
// ClearOptions clear SSL options. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
func (s *SSL) ClearOptions(options Options) Options {
return Options(C.SSL_clear_options_not_a_macro(s.ssl, C.long(options)))
}
// SetVerify controls peer verification settings. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
s.verify_cb = verify_cb
if verify_cb != nil {
C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.verify_ssl_cb))
} else {
C.SSL_set_verify(s.ssl, C.int(options), nil)
}
}
// SetVerifyMode controls peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyMode(options VerifyOptions) {
s.SetVerify(options, s.verify_cb)
}
// SetVerifyCallback controls peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) {
s.SetVerify(s.VerifyMode(), verify_cb)
}
// GetVerifyCallback returns callback function. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) GetVerifyCallback() VerifyCallback {
return s.verify_cb
}
// VerifyMode returns peer verification setting. See
// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) VerifyMode() VerifyOptions {
return VerifyOptions(C.SSL_get_verify_mode(s.ssl))
}
// SetVerifyDepth controls how many certificates deep the certificate
// verification logic is willing to follow a certificate chain. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) SetVerifyDepth(depth int) {
C.SSL_set_verify_depth(s.ssl, C.int(depth))
}
// GetVerifyDepth controls how many certificates deep the certificate
// verification logic is willing to follow a certificate chain. See
// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
func (s *SSL) GetVerifyDepth() int {
return int(C.SSL_get_verify_depth(s.ssl))
}
// SetSSLCtx changes context to new one. Useful for Server Name Indication (SNI)
// rfc6066 http://tools.ietf.org/html/rfc6066. See
// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni
func (s *SSL) SetSSLCtx(ctx *Ctx) {
/*
* SSL_set_SSL_CTX() only changes certs as of 1.0.0d
* adjust other things we care about
*/
C.SSL_set_SSL_CTX(s.ssl, ctx.ctx)
}
//export sni_cb_thunk
func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int {
defer func() {
if err := recover(); err != nil {
logger.Critf("openssl: verify callback sni panic'd: %v", err)
os.Exit(1)
}
}()
sni_cb := (*Ctx)(p).sni_cb
s := &SSL{ssl: con}
// This attaches a pointer to our SSL struct into the SNI callback.
C.SSL_set_ex_data(s.ssl, get_ssl_idx(), unsafe.Pointer(s))
// Note: this is ctx.sni_cb, not C.sni_cb
return C.int(sni_cb(s))
}
| xakep666/openssl | ssl.go | GO | apache-2.0 | 5,686 |
# Ranunculus gentryanus var. typicus L.D.Benson VARIETY
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Ranunculales/Ranunculaceae/Ranunculus/Ranunculus gentryanus/Ranunculus gentryanus typicus/README.md | Markdown | apache-2.0 | 195 |
package org.support.project.knowledge.vo.notification.webhook;
public class WebhookLongIdJson {
public long id;
}
| support-project/knowledge | src/main/java/org/support/project/knowledge/vo/notification/webhook/WebhookLongIdJson.java | Java | apache-2.0 | 119 |
package etri.sdn.controller.module.vxlanflowmapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.codehaus.jackson.map.ObjectMapper;
public class Tester {
public static void main(String[] args) {
testV2PRequest();
testV2PResponse();
}
public static void testV2PResponse() {
HeaderInfoPair pair1 = new HeaderInfoPair(
new OuterPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22").
srcIp("10.0.0.11").
dstIp("10.0.0.22").
udpPort("1001")
.build(),
new OrginalPacketHeader.Builder()
.srcMac("00:00:00:00:00:11")
.dstMac("00:00:00:00:00:22")
.srcIp("10.0.0.11")
.dstIp("10.0.0.22")
.vnid("1001")
.build() );
List<HeaderInfoPair> pairs = Arrays.asList(pair1);
V2PResponse response = new V2PResponse(pairs);
ObjectMapper mapper = new ObjectMapper();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(response);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
public static void testV2PRequest() {
OuterPacketHeader orgHeader = new OuterPacketHeader("00:00:00:00:00:01", "00:00:00:00:00:02", "10.0.0.1", "10.0.0.2", "1234");
List<OuterPacketHeader> headers= Arrays.asList(orgHeader);
P2VRequest request = new P2VRequest(headers);
// request.outerList = headers;
ObjectMapper mapper = new ObjectMapper();
List<OuterPacketHeader> switchs = new ArrayList<>();
String output = null;
try {
output = mapper.defaultPrettyPrintingWriter().writeValueAsString(request);
System.out.println(output);
} catch (IOException e) {
e.printStackTrace();
}
}
}
| uni2u/iNaaS | Torpedo/src/etri/sdn/controller/module/vxlanflowmapper/Tester.java | Java | apache-2.0 | 1,831 |
# Olax glabriflora Danguy SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Santalales/Olacaceae/Olax/Olax glabriflora/README.md | Markdown | apache-2.0 | 173 |
# Mortierella fusca E. Wolf, 1954 SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Zentbl. Bakt. ParasitKde, Abt. II 107: 534 (1954)
#### Original name
Mortierella fusca E. Wolf, 1954
### Remarks
null | mdoering/backbone | life/Fungi/Zygomycota/Mucorales/Umbelopsidaceae/Umbelopsis/Umbelopsis isabellina/ Syn. Mortierella fusca/README.md | Markdown | apache-2.0 | 260 |
# Daucus commutatus Thell. SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Apiales/Apiaceae/Daucus/Daucus commutatus/README.md | Markdown | apache-2.0 | 182 |
# Decaspermum raymundi Diels SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Myrtales/Myrtaceae/Decaspermum/Decaspermum raymundi/README.md | Markdown | apache-2.0 | 184 |
# Sagittaria latifolia var. glabra VARIETY
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Alismatales/Alismataceae/Sagittaria/Sagittaria latifolia/ Syn. Sagittaria latifolia glabra/README.md | Markdown | apache-2.0 | 189 |
# Ornithogalum xanthocodon (Hilliard & B.L.Burtt) J.C.Manning & Goldblatt SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Asparagales/Asparagaceae/Albuca/Albuca xanthocodon/ Syn. Ornithogalum xanthocodon/README.md | Markdown | apache-2.0 | 228 |
# Mansoa schwackei Bureau & K.Schum. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Lamiales/Bignoniaceae/Mansoa/Mansoa schwackei/README.md | Markdown | apache-2.0 | 184 |
# Juniperus coahuilensis (Martínez) Gaussen ex R.P. Adams SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Phytologia 74:413. 1993 (Gaussen, Trav. Lab. Forest. Toulouse tome II(I,1,pt. II 2, fasc. 10:101, 154. 1968, nom. inval. )
#### Original name
Juniperus erythrocarpa var. coahuilensis Martínez
### Remarks
null | mdoering/backbone | life/Plantae/Pinophyta/Pinopsida/Pinales/Cupressaceae/Juniperus/Juniperus coahuilensis/README.md | Markdown | apache-2.0 | 378 |
# Nitzschia gruendleri Grunow, 1878 SPECIES
#### Status
ACCEPTED
#### According to
World Register of Marine Species
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Bacillariophyta/Bacillariophyceae/Bacillariales/Bacillariaceae/Nitzschia/Nitzschia gruendleri/README.md | Markdown | apache-2.0 | 184 |
# Petalomonas sulcata Stokes SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Protozoa/Euglenozoa/Euglenida/Euglenales/Astasiaceae/Petalomonas/Petalomonas sulcata/README.md | Markdown | apache-2.0 | 184 |
# Pseudopeziza alismatis (W. Phillips & Trail) Sacc., 1889 SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Syll. fung. (Abellini) 8: 728 (1889)
#### Original name
Mollisia alismatis W. Phillips & Trail, 1888
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Leotiomycetes/Helotiales/Dermateaceae/Pseudopeziza/Pseudopeziza alismatis/README.md | Markdown | apache-2.0 | 286 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Mon Mar 03 10:44:38 EST 2014 -->
<title>Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder (Hibernate JavaDocs)</title>
<meta name="date" content="2014-03-03">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder (Hibernate JavaDocs)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/hibernate/metamodel/source/annotations/EntityHierarchyBuilder.html" title="class in org.hibernate.metamodel.source.annotations">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/hibernate/metamodel/source/annotations/class-use/EntityHierarchyBuilder.html" target="_top">Frames</a></li>
<li><a href="EntityHierarchyBuilder.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder" class="title">Uses of Class<br>org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder</h2>
</div>
<div class="classUseContainer">No usage of org.hibernate.metamodel.source.annotations.EntityHierarchyBuilder</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/hibernate/metamodel/source/annotations/EntityHierarchyBuilder.html" title="class in org.hibernate.metamodel.source.annotations">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/hibernate/metamodel/source/annotations/class-use/EntityHierarchyBuilder.html" target="_top">Frames</a></li>
<li><a href="EntityHierarchyBuilder.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2001-2014 <a href="http://redhat.com">Red Hat, Inc.</a> All Rights Reserved.</small></p>
</body>
</html>
| serious6/HibernateSimpleProject | javadoc/hibernate_Doc/org/hibernate/metamodel/source/annotations/class-use/EntityHierarchyBuilder.html | HTML | apache-2.0 | 4,694 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_91) on Mon Aug 22 09:59:23 CST 2016 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>play.data.parsing (Play! API)</title>
<meta name="date" content="2016-08-22">
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
</head>
<body>
<h1 class="bar"><a href="../../../play/data/parsing/package-summary.html" target="classFrame">play.data.parsing</a></h1>
<div class="indexContainer">
<h2 title="Classes">Classes</h2>
<ul title="Classes">
<li><a href="ApacheMultipartParser.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser</a></li>
<li><a href="ApacheMultipartParser.AutoFileItem.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser.AutoFileItem</a></li>
<li><a href="DataParser.html" title="class in play.data.parsing" target="classFrame">DataParser</a></li>
<li><a href="DataParsers.html" title="class in play.data.parsing" target="classFrame">DataParsers</a></li>
<li><a href="MultipartStream.html" title="class in play.data.parsing" target="classFrame">MultipartStream</a></li>
<li><a href="TempFilePlugin.html" title="class in play.data.parsing" target="classFrame">TempFilePlugin</a></li>
<li><a href="TextParser.html" title="class in play.data.parsing" target="classFrame">TextParser</a></li>
<li><a href="UrlEncodedParser.html" title="class in play.data.parsing" target="classFrame">UrlEncodedParser</a></li>
</ul>
<h2 title="Exceptions">Exceptions</h2>
<ul title="Exceptions">
<li><a href="ApacheMultipartParser.SizeException.html" title="class in play.data.parsing" target="classFrame">ApacheMultipartParser.SizeException</a></li>
<li><a href="MultipartStream.IllegalBoundaryException.html" title="class in play.data.parsing" target="classFrame">MultipartStream.IllegalBoundaryException</a></li>
<li><a href="MultipartStream.MalformedStreamException.html" title="class in play.data.parsing" target="classFrame">MultipartStream.MalformedStreamException</a></li>
</ul>
</div>
</body>
</html>
| play1-maven-plugin/play1-maven-plugin.github.io | external-apidocs/com/google/code/maven-play-plugin/org/playframework/play/1.4.3/play/data/parsing/package-frame.html | HTML | apache-2.0 | 2,265 |
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var tape = require( 'tape' );
var Readable = require( 'readable-stream' ).Readable;
var now = require( '@stdlib/time/now' );
var arcsine = require( '@stdlib/random/base/arcsine' ).factory;
var isBuffer = require( '@stdlib/assert/is-buffer' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var isUint32Array = require( '@stdlib/assert/is-uint32array' );
var UINT32_MAX = require( '@stdlib/constants/uint32/max' );
var Uint32Array = require( '@stdlib/array/uint32' );
var minstd = require( '@stdlib/random/base/minstd' );
var inspectStream = require( '@stdlib/streams/node/inspect-sink' );
var randomStream = require( './../lib/main.js' );
// TESTS //
tape( 'main export is a function', function test( t ) {
t.ok( true, __filename );
t.equal( typeof randomStream, 'function', 'main export is a function' );
t.end();
});
tape( 'the function throws an error if minimum support `a` is not a number primitive', function test( t ) {
var values;
var i;
values = [
'5',
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( value, 2.0 );
};
}
});
tape( 'the function throws an error if maximum support `b` is not a number primitive', function test( t ) {
var values;
var i;
values = [
'5',
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, value );
};
}
});
tape( 'the function throws an error if minimum support `a` is greater than or equal to maximum support `b`', function test( t ) {
var values;
var i;
values = [
[ 0.0, 0.0 ],
[ -2.0, -4.0 ],
[ 2.0, 1.0 ]
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( arr ) {
return function badValue() {
randomStream( arr[0], arr[1] );
};
}
});
tape( 'the function throws an error if provided an options argument which is not an object', function test( t ) {
var values;
var i;
values = [
'abc',
5,
null,
true,
false,
void 0,
NaN,
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, value );
};
}
});
tape( 'the function throws an error if provided an invalid `iter` option', function test( t ) {
var values;
var i;
values = [
'abc',
-5,
3.14,
null,
true,
false,
void 0,
NaN,
[],
{},
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'iter': value
});
};
}
});
tape( 'if provided a `prng` option which is not a function, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
3.14,
NaN,
true,
false,
null,
void 0,
[],
{}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'prng': value
});
};
}
});
tape( 'if provided a `copy` option which is not a boolean, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'copy': value
});
};
}
});
tape( 'if provided a `seed` which is not a positive integer or a non-empty array-like object, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
3.14,
0.0,
-5.0,
NaN,
true,
false,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'seed': value
});
};
}
});
tape( 'the function throws a range error if provided a `seed` which is an integer greater than the maximum unsigned 32-bit integer', function test( t ) {
var values;
var i;
values = [
UINT32_MAX + 1,
UINT32_MAX + 2,
UINT32_MAX + 3
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws a range error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'seed': value
});
};
}
});
tape( 'if provided a `state` option which is not a Uint32Array, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
true,
false,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'state': value
});
};
}
});
tape( 'if provided an invalid `state` option, the function throws an error', function test( t ) {
var values;
var i;
values = [
new Uint32Array( 0 ),
new Uint32Array( 10 ),
new Uint32Array( 100 )
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), RangeError, 'throws an error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'state': value
});
};
}
});
tape( 'if provided an invalid readable stream option, the function throws an error', function test( t ) {
var values;
var i;
values = [
'5',
5,
NaN,
null,
void 0,
{},
[],
function noop() {}
];
for ( i = 0; i < values.length; i++ ) {
t.throws( badValue( values[i] ), TypeError, 'throws a type error when provided '+values[i] );
}
t.end();
function badValue( value ) {
return function badValue() {
randomStream( 2.0, 5.0, {
'objectMode': value
});
};
}
});
tape( 'the function is a constructor which returns a readable stream', function test( t ) {
var RandomStream = randomStream;
var s;
s = new RandomStream( 2.0, 5.0 );
t.equal( s instanceof Readable, true, 'returns expected value' );
t.end();
});
tape( 'the constructor does not require the `new` operator', function test( t ) {
var RandomStream = randomStream;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( s instanceof RandomStream, true, 'returns expected value' );
t.end();
});
tape( 'the constructor returns a readable stream (no new)', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( s instanceof Readable, true, 'returns expected value' );
t.end();
});
tape( 'the returned stream provides a method to destroy a stream (object)', function test( t ) {
var count = 0;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( typeof s.destroy, 'function', 'has destroy method' );
s.on( 'error', onError );
s.on( 'close', onClose );
s.destroy({
'message': 'beep'
});
function onError( err ) {
count += 1;
if ( err ) {
t.ok( true, err.message );
} else {
t.ok( false, 'does not error' );
}
if ( count === 2 ) {
t.end();
}
}
function onClose() {
count += 1;
t.ok( true, 'stream closes' );
if ( count === 2 ) {
t.end();
}
}
});
tape( 'the returned stream provides a method to destroy a stream (error object)', function test( t ) {
var count = 0;
var s;
s = randomStream( 2.0, 5.0 );
t.equal( typeof s.destroy, 'function', 'has destroy method' );
s.on( 'error', onError );
s.on( 'close', onClose );
s.destroy( new Error( 'beep' ) );
function onError( err ) {
count += 1;
if ( err ) {
t.ok( true, err.message );
} else {
t.ok( false, 'does not error' );
}
if ( count === 2 ) {
t.end();
}
}
function onClose() {
count += 1;
t.ok( true, 'stream closes' );
if ( count === 2 ) {
t.end();
}
}
});
tape( 'the returned stream does not allow itself to be destroyed more than once', function test( t ) {
var s;
s = randomStream( 2.0, 5.0 );
s.on( 'error', onError );
s.on( 'close', onClose );
// If the stream is closed twice, the test will error...
s.destroy();
s.destroy();
function onClose() {
t.ok( true, 'stream closes' );
t.end();
}
function onError( err ) {
t.ok( false, err.message );
}
});
tape( 'attached to the returned stream is the underlying PRNG', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.PRNG, 'function', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.PRNG, minstd.normalized, 'has property' );
t.end();
});
tape( 'attached to the returned stream is the generator seed', function test( t ) {
var s = randomStream( 2.0, 5.0, {
'seed': 12345
});
t.equal( isUint32Array( s.seed ), true, 'has property' );
t.equal( s.seed[ 0 ], 12345, 'equal to provided seed' );
s = randomStream( 2.0, 5.0, {
'seed': 12345,
'prng': minstd.normalized
});
t.equal( s.seed, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator seed (array seed)', function test( t ) {
var actual;
var seed;
var s;
var i;
seed = [ 1234, 5678 ];
s = randomStream( 2.0, 5.0, {
'seed': seed
});
actual = s.seed;
t.equal( isUint32Array( actual ), true, 'has property' );
for ( i = 0; i < seed.length; i++ ) {
t.equal( actual[ i ], seed[ i ], 'returns expected value for word '+i );
}
t.end();
});
tape( 'attached to the returned stream is the generator seed length', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.seedLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.seedLength, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( isUint32Array( s.state ), true, 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.state, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state length', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.stateLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.stateLength, null, 'equal to `null`' );
t.end();
});
tape( 'attached to the returned stream is the generator state size', function test( t ) {
var s = randomStream( 2.0, 5.0 );
t.equal( typeof s.byteLength, 'number', 'has property' );
s = randomStream( 2.0, 5.0, {
'prng': minstd.normalized
});
t.equal( s.byteLength, null, 'equal to `null`' );
t.end();
});
tape( 'the constructor returns a stream for generating pseudorandom numbers from an arcsine distribution', function test( t ) {
var iStream;
var result;
var rand;
var opts;
var s;
// Note: we assume that the underlying generator is the following PRNG...
rand = arcsine( 2.0, 5.0, {
'seed': 12345
});
opts = {
'seed': 12345,
'iter': 10,
'sep': '\n'
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
t.equal( isBuffer( chunk ), true, 'returns a buffer' );
result += chunk.toString();
}
function onEnd() {
var i;
t.pass( 'stream ended' );
result = result.split( '\n' );
t.equal( result.length, 10, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
t.equal( parseFloat( result[ i ] ), rand(), 'returns expected value. i: ' + i + '.' );
}
t.end();
}
});
tape( 'the constructor returns a stream for generating pseudorandom numbers from an arcsine distribution (object mode)', function test( t ) {
var iStream;
var count;
var rand;
var opts;
var s;
// Note: we assume that the underlying generator is the following PRNG...
rand = arcsine( 2.0, 5.0, {
'seed': 12345
});
opts = {
'seed': 12345,
'objectMode': true
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'close', onClose );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
count = 0;
s.pipe( iStream );
function inspect( v ) {
count += 1;
t.equal( rand(), v, 'returns expected value. i: '+count+'.' );
if ( count >= 10 ) {
s.destroy();
}
}
function onClose() {
t.pass( 'stream closed' );
t.end();
}
});
tape( 'the constructor supports limiting the number of iterations', function test( t ) {
var iStream;
var count;
var niter;
var opts;
var s;
niter = 10;
count = 0;
opts = {
'iter': niter,
'objectMode': true
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
s.pipe( iStream );
function inspect( v ) {
count += 1;
t.equal( typeof v, 'number', 'returns expected value' );
}
function onEnd() {
t.equal( count === niter, true, 'performs expected number of iterations' );
t.end();
}
});
tape( 'by default, the constructor generates newline-delimited pseudorandom numbers', function test( t ) {
var iStream;
var result;
var opts;
var s;
opts = {
'iter': 10
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
iStream = inspectStream( inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
result += chunk.toString();
}
function onEnd() {
var v;
var i;
result = result.split( '\n' );
t.equal( result.length, opts.iter, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
v = parseFloat( result[ i ] );
t.equal( typeof v, 'number', 'returns expected value' );
t.equal( isnan( v ), false, 'is not NaN' );
}
t.end();
}
});
tape( 'the constructor supports providing a custom separator for streamed values', function test( t ) {
var iStream;
var result;
var opts;
var s;
opts = {
'iter': 10,
'sep': '--++--'
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
iStream = inspectStream( inspect );
result = '';
s.pipe( iStream );
function inspect( chunk ) {
result += chunk.toString();
}
function onEnd() {
var v;
var i;
result = result.split( opts.sep );
t.equal( result.length, opts.iter, 'has expected length' );
for ( i = 0; i < result.length; i++ ) {
v = parseFloat( result[ i ] );
t.equal( typeof v, 'number', 'returns expected value' );
t.equal( isnan( v ), false, 'is not NaN' );
}
t.end();
}
});
tape( 'the constructor supports returning a seeded readable stream', function test( t ) {
var iStream;
var opts;
var seed;
var arr;
var s1;
var s2;
var i;
seed = now();
opts = {
'objectMode': true,
'seed': seed,
'iter': 10
};
s1 = randomStream( 2.0, 5.0, opts );
s1.on( 'end', onEnd1 );
s2 = randomStream( 2.0, 5.0, opts );
s2.on( 'end', onEnd2 );
t.notEqual( s1, s2, 'separate streams' );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
arr = [];
i = 0;
s1.pipe( iStream );
function inspect1( v ) {
arr.push( v );
}
function onEnd1() {
var iStream;
var opts;
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
s2.pipe( iStream );
}
function inspect2( v ) {
t.equal( v, arr[ i ], 'returns expected value' );
i += 1;
}
function onEnd2() {
t.end();
}
});
tape( 'the constructor supports specifying the underlying PRNG', function test( t ) {
var iStream;
var opts;
var s;
opts = {
'prng': minstd.normalized,
'objectMode': true,
'iter': 10
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
s.pipe( iStream );
function inspect( v ) {
t.equal( typeof v, 'number', 'returns a number' );
}
function onEnd() {
t.end();
}
});
tape( 'the constructor supports providing a seeded underlying PRNG', function test( t ) {
var iStream1;
var iStream2;
var randu;
var seed;
var opts;
var FLG;
var s1;
var s2;
var r1;
var r2;
seed = now();
randu = minstd.factory({
'seed': seed
});
opts = {
'prng': randu.normalized,
'objectMode': true,
'iter': 10
};
s1 = randomStream( 2.0, 5.0, opts );
s1.on( 'end', onEnd );
randu = minstd.factory({
'seed': seed
});
opts = {
'prng': randu.normalized,
'objectMode': true,
'iter': 10
};
s2 = randomStream( 2.0, 5.0, opts );
s2.on( 'end', onEnd );
t.notEqual( s1, s2, 'separate streams' );
opts = {
'objectMode': true
};
iStream1 = inspectStream( opts, inspect1 );
iStream2 = inspectStream( opts, inspect2 );
r1 = [];
r2 = [];
s1.pipe( iStream1 );
s2.pipe( iStream2 );
function inspect1( v ) {
r1.push( v );
}
function inspect2( v ) {
r2.push( v );
}
function onEnd() {
if ( FLG ) {
t.deepEqual( r1, r2, 'streams expected values' );
return t.end();
}
FLG = true;
}
});
tape( 'the constructor supports specifying the underlying generator state', function test( t ) {
var iStream;
var state;
var count;
var opts;
var arr;
var s;
opts = {
'objectMode': true,
'iter': 10,
'siter': 5
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'state', onState );
s.on( 'end', onEnd1 );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
count = 0;
arr = [];
// Move to a future state...
s.pipe( iStream );
function onState( s ) {
// Only capture the first emitted state...
if ( !state ) {
state = s;
}
}
function inspect1( v ) {
count += 1;
if ( count > 5 ) {
arr.push( v );
}
}
function onEnd1() {
var iStream;
var opts;
var s;
t.pass( 'first stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 5,
'state': state
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd2 );
t.deepEqual( state, s.state, 'same state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
// Replay previously generated values...
count = 0;
s.pipe( iStream );
}
function inspect2( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd2() {
t.pass( 'second stream ended' );
t.end();
}
});
tape( 'the constructor supports specifying a shared underlying generator state', function test( t ) {
var iStream;
var shared;
var state;
var count;
var opts;
var arr;
var s;
opts = {
'objectMode': true,
'iter': 10,
'siter': 4
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'state', onState );
s.on( 'end', onEnd1 );
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect1 );
count = 0;
arr = [];
// Move to a future state...
s.pipe( iStream );
function onState( s ) {
// Only capture the first emitted state...
if ( !state ) {
state = s;
// Create a copy of the state (to prevent mutation) which will be shared by more than one PRNG:
shared = new Uint32Array( state );
}
}
function inspect1( v ) {
count += 1;
if ( count > 4 ) {
arr.push( v );
}
}
function onEnd1() {
var iStream;
var opts;
var s;
t.pass( 'first stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 3,
'state': shared,
'copy': false
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd2 );
t.deepEqual( state, s.state, 'same state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect2 );
// Replay previously generated values...
count = 0;
s.pipe( iStream );
}
function inspect2( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd2() {
var iStream;
var opts;
var s;
t.pass( 'second stream ended' );
// Create another stream using the captured state:
opts = {
'objectMode': true,
'iter': 3,
'state': shared,
'copy': false
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd3 );
t.notDeepEqual( state, s.state, 'different state' );
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect3 );
// Continue replaying previously generated values...
s.pipe( iStream );
}
function inspect3( v ) {
count += 1;
t.equal( v, arr[ count-1 ], 'returns expected value. i: '+(count-1)+'.' );
}
function onEnd3() {
t.pass( 'third stream ended' );
t.end();
}
});
tape( 'the returned stream supports setting the underlying generator state', function test( t ) {
var iStream;
var state;
var rand;
var opts;
var arr;
var s;
var i;
rand = arcsine( 2.0, 5.0 );
// Move to a future state...
for ( i = 0; i < 5; i++ ) {
rand();
}
// Capture the current state:
state = rand.state;
// Move to a future state...
arr = [];
for ( i = 0; i < 5; i++ ) {
arr.push( rand() );
}
// Create a random stream:
opts = {
'objectMode': true,
'iter': 5
};
s = randomStream( 2.0, 5.0, opts );
s.on( 'end', onEnd );
// Set the PRNG state:
s.state = state;
// Create a new inspect stream:
opts = {
'objectMode': true
};
iStream = inspectStream( opts, inspect );
// Replay previously generated values:
i = 0;
s.pipe( iStream );
function inspect( v ) {
t.equal( v, arr[ i ], 'returns expected value. i: ' + i + '.' );
i += 1;
}
function onEnd() {
t.end();
}
});
| stdlib-js/stdlib | lib/node_modules/@stdlib/random/streams/arcsine/test/test.main.js | JavaScript | apache-2.0 | 23,031 |
module ZendeskAPI
# Creates put, post, delete class methods for custom resource methods.
module Verbs
class << self
private
# @macro [attach] container.create_verb
# @method $1(method)
# Executes a $1 using the passed in method as a path.
# Reloads the resource's attributes if any are in the response body.
#
# Created method takes an optional options hash. Valid options to be passed in to the created method: reload (for caching, default: false)
def create_verb(method_verb)
define_method method_verb do |method|
define_method "#{method}!" do |*method_args|
opts = method_args.last.is_a?(Hash) ? method_args.pop : {}
if method_verb == :any
verb = opts.delete(:verb)
raise(ArgumentError, ":verb required for method defined as :any") unless verb
else
verb = method_verb
end
@response = @client.connection.send(verb, "#{path}/#{method}") do |req|
req.body = opts
end
return false unless @response.success?
return false unless @response.body
resource = nil
if @response.body.is_a?(Hash)
resource = @response.body[self.class.singular_resource_name]
resource ||= @response.body.fetch(self.class.resource_name, []).detect { |res| res["id"] == id }
end
@attributes.replace @attributes.deep_merge(resource || {})
@attributes.clear_changes
clear_associations
true
end
define_method method do |*method_args|
begin
send("#{method}!", *method_args)
rescue ZendeskAPI::Error::RecordInvalid => e
@errors = e.errors
false
rescue ZendeskAPI::Error::ClientError
false
end
end
end
end
end
create_verb :put
create_verb :post
create_verb :delete
create_verb :any
end
end
| zendesk/zendesk_api_client_rb | lib/zendesk_api/verbs.rb | Ruby | apache-2.0 | 2,079 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package fuzzy.internal.functions;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.junit.Test;
/**
* Tests for Max function.
*
* @since 0.2
* @see Max
*/
public class TestMax {
@Test
public void testMax() {
Collection<Double> list = Arrays.asList(-1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, false);
assertEquals(Double.valueOf(3.5), r);
}
@Test
public void testMaxEmpty() {
Double r = Max.of(Collections.<Double>emptyList(), false);
assertEquals(Double.valueOf(0.0), r);
}
@Test
public void testMaxAbs() {
Collection<Double> list = Arrays.asList(-10.0, -1.0, 1.0, 2.0, 3.5);
Double r = Max.of(list, true);
assertEquals(Double.valueOf(-10.0), r);
}
}
| tupilabs/nebular | src/test/java/fuzzy/internal/functions/TestMax.java | Java | apache-2.0 | 1,371 |
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.orm.type.descriptor.sql.internal;
import java.time.temporal.TemporalAccessor;
import javax.persistence.TemporalType;
import org.hibernate.dialect.Dialect;
import org.hibernate.orm.type.descriptor.internal.DateTimeUtils;
import org.hibernate.orm.type.descriptor.java.spi.TemporalJavaTypeDescriptor;
import org.hibernate.orm.type.descriptor.spi.WrapperOptions;
/**
* @author Steve Ebersole
*/
public class JdbcLiteralFormatterTemporal extends BasicJdbcLiteralFormatter {
private final TemporalType precision;
public JdbcLiteralFormatterTemporal(TemporalJavaTypeDescriptor javaTypeDescriptor, TemporalType precision) {
super( javaTypeDescriptor );
this.precision = precision;
// todo : add some validation of combos between javaTypeDescrptor#getPrecision and precision - log warnings
}
@Override
protected TemporalJavaTypeDescriptor getJavaTypeDescriptor() {
return (TemporalJavaTypeDescriptor) super.getJavaTypeDescriptor();
}
@Override
public String toJdbcLiteral(Object value, Dialect dialect, WrapperOptions wrapperOptions) {
// for performance reasons, avoid conversions if we can
if ( value instanceof java.util.Date ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Date) value,
precision
);
}
else if ( value instanceof java.util.Calendar ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(java.util.Calendar) value,
precision
);
}
else if ( value instanceof TemporalAccessor ) {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
(TemporalAccessor) value,
precision
);
}
switch ( getJavaTypeDescriptor().getPrecision() ) {
case DATE: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Date.class, wrapperOptions ),
precision
);
}
case TIME: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.sql.Time.class, wrapperOptions ),
precision
);
}
default: {
return DateTimeUtils.formatJdbcLiteralUsingPrecision(
unwrap( value, java.util.Date.class, wrapperOptions ),
precision
);
}
}
}
}
| hibernate/hibernate-semantic-query | src/test/java/org/hibernate/orm/type/descriptor/sql/internal/JdbcLiteralFormatterTemporal.java | Java | apache-2.0 | 2,399 |
# Septoria ribis f. ribis (Lib.) Desm., 1842 FORM
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Dothideomycetes/Capnodiales/Mycosphaerellaceae/Mycosphaerella/Mycosphaerella ribis/ Syn. Septoria ribis ribis/README.md | Markdown | apache-2.0 | 196 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0-google-v5) on Thu Dec 19 17:42:37 EST 2013 -->
<title>TechnologyTargetingErrorReason</title>
<meta name="date" content="2013-12-19">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="TechnologyTargetingErrorReason";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingError.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TemplateCreative.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" target="_top">Frames</a></li>
<li><a href="TechnologyTargetingErrorReason.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field_summary">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field_detail">Field</a> | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">com.google.api.ads.dfp.v201306</div>
<h2 title="Class TechnologyTargetingErrorReason" class="title">Class TechnologyTargetingErrorReason</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd>java.io.Serializable</dd>
</dl>
<hr>
<br>
<pre>public class <span class="strong">TechnologyTargetingErrorReason</span>
extends java.lang.Object
implements java.io.Serializable</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../serialized-form.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason">Serialized Form</a></dd></dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- =========== FIELD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="field_summary">
<!-- -->
</a>
<h3>Field Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Field Summary table, listing fields, and an explanation">
<caption><span>Fields</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Field and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_UNKNOWN">_UNKNOWN</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</a></strong></code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#UNKNOWN">UNKNOWN</a></strong></code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</a></strong></code> </td>
</tr>
</table>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier</th>
<th class="colLast" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>protected </code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#TechnologyTargetingErrorReason(java.lang.String)">TechnologyTargetingErrorReason</a></strong>(java.lang.String value)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#equals(java.lang.Object)">equals</a></strong>(java.lang.Object obj)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#fromString(java.lang.String)">fromString</a></strong>(java.lang.String value)</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#fromValue(java.lang.String)">fromValue</a></strong>(java.lang.String value)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static org.apache.axis.encoding.Deserializer</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getDeserializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">getDeserializer</a></strong>(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static org.apache.axis.encoding.Serializer</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getSerializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">getSerializer</a></strong>(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>static org.apache.axis.description.TypeDesc</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getTypeDesc()">getTypeDesc</a></strong>()</code>
<div class="block">Return type metadata object</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#getValue()">getValue</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#hashCode()">hashCode</a></strong>()</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>java.lang.Object</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#readResolve()">readResolve</a></strong>()</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><strong><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html#toString()">toString</a></strong>()</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, finalize, getClass, notify, notifyAll, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ FIELD DETAIL =========== -->
<ul class="blockList">
<li class="blockList"><a name="field_detail">
<!-- -->
</a>
<h3>Field Detail</h3>
<a name="_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</h4>
<pre>public static final java.lang.String _MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</h4>
<pre>public static final java.lang.String _WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final java.lang.String _DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="_UNKNOWN">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>_UNKNOWN</h4>
<pre>public static final java.lang.String _UNKNOWN</pre>
<dl><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../../constant-values.html#com.google.api.ads.dfp.v201306.TechnologyTargetingErrorReason._UNKNOWN">Constant Field Values</a></dd></dl>
</li>
</ul>
<a name="MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> MOBILE_LINE_ITEM_CONTAINS_WEB_TECH_CRITERIA</pre>
</li>
</ul>
<a name="WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> WEB_LINE_ITEM_CONTAINS_MOBILE_TECH_CRITERIA</pre>
</li>
</ul>
<a name="MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> MOBILE_CARRIER_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> DEVICE_CAPABILITY_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> DEVICE_CATEGORY_TARGETING_FEATURE_NOT_ENABLED</pre>
</li>
</ul>
<a name="UNKNOWN">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>UNKNOWN</h4>
<pre>public static final <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> UNKNOWN</pre>
</li>
</ul>
</li>
</ul>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="TechnologyTargetingErrorReason(java.lang.String)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>TechnologyTargetingErrorReason</h4>
<pre>protected TechnologyTargetingErrorReason(java.lang.String value)</pre>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="getValue()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getValue</h4>
<pre>public java.lang.String getValue()</pre>
</li>
</ul>
<a name="fromValue(java.lang.String)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>fromValue</h4>
<pre>public static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> fromValue(java.lang.String value)
throws java.lang.IllegalArgumentException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.IllegalArgumentException</code></dd></dl>
</li>
</ul>
<a name="fromString(java.lang.String)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>fromString</h4>
<pre>public static <a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" title="class in com.google.api.ads.dfp.v201306">TechnologyTargetingErrorReason</a> fromString(java.lang.String value)
throws java.lang.IllegalArgumentException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.lang.IllegalArgumentException</code></dd></dl>
</li>
</ul>
<a name="equals(java.lang.Object)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>equals</h4>
<pre>public boolean equals(java.lang.Object obj)</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>equals</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="hashCode()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>hashCode</h4>
<pre>public int hashCode()</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>hashCode</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="toString()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>toString</h4>
<pre>public java.lang.String toString()</pre>
<dl>
<dt><strong>Overrides:</strong></dt>
<dd><code>toString</code> in class <code>java.lang.Object</code></dd>
</dl>
</li>
</ul>
<a name="readResolve()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>readResolve</h4>
<pre>public java.lang.Object readResolve()
throws java.io.ObjectStreamException</pre>
<dl><dt><span class="strong">Throws:</span></dt>
<dd><code>java.io.ObjectStreamException</code></dd></dl>
</li>
</ul>
<a name="getSerializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getSerializer</h4>
<pre>public static org.apache.axis.encoding.Serializer getSerializer(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</pre>
</li>
</ul>
<a name="getDeserializer(java.lang.String, java.lang.Class, javax.xml.namespace.QName)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getDeserializer</h4>
<pre>public static org.apache.axis.encoding.Deserializer getDeserializer(java.lang.String mechType,
java.lang.Class _javaType,
javax.xml.namespace.QName _xmlType)</pre>
</li>
</ul>
<a name="getTypeDesc()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>getTypeDesc</h4>
<pre>public static org.apache.axis.description.TypeDesc getTypeDesc()</pre>
<div class="block">Return type metadata object</div>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TechnologyTargetingError.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Prev Class</span></a></li>
<li><a href="../../../../../../com/google/api/ads/dfp/v201306/TemplateCreative.html" title="class in com.google.api.ads.dfp.v201306"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html" target="_top">Frames</a></li>
<li><a href="TechnologyTargetingErrorReason.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li><a href="#field_summary">Field</a> | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li><a href="#field_detail">Field</a> | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| google-code-export/google-api-dfp-java | docs/com/google/api/ads/dfp/v201306/TechnologyTargetingErrorReason.html | HTML | apache-2.0 | 27,487 |
## user-rest-service-2.0.0 / Swagger 2.0.0-SNAPSHOT
This demo uses the stable version 2.0.0-SNAPSHOT of Swagger-Spring Integration, will always update to head.
Swagger 2.0.0 is currently under development.
### IDE
Currently maven configuration files are supplied for all projects.
* Run Spring Boot Application: mvn spring-boot:run
* Generate Eclipse configuration: mvn eclipse:eclipse
### Features of Model-Classes
* enum (user.state)
* java.lang.Byte[] (user.photo)
* java.lang.Double (user.longitude)
* java.util.List (user.category, user.ocation)
* java.math.BigDecimal (location.langitude)
### Features of Controller-Classes
* HTTP GET/POST (UserController)
* Handling Error-codes using Exceptions (UserController)
* Handling Error-codes using ResponseEntity (UserControllerResponseEntity)
* Multiple Paths for the same HTTP verb (UserControllerForCodegenWithTwoGetPaths)
>>> currently commented second verb because of errors during CodeGen
| jfiala/swagger-spring-demo | user-rest-service-2.0.0/README.md | Markdown | apache-2.0 | 952 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"SpecialistPool",},
)
class SpecialistPool(proto.Message):
r"""SpecialistPool represents customers' own workforce to work on
their data labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for managing the
workers in this pool as well as customers' data labeling jobs
associated with this pool. Customers create specialist pool as
well as start data labeling jobs on Cloud, managers and workers
handle the jobs using CrowdCompute console.
Attributes:
name (str):
Required. The resource name of the
SpecialistPool.
display_name (str):
Required. The user-defined name of the
SpecialistPool. The name can be up to 128
characters long and can be consist of any UTF-8
characters.
This field should be unique on project-level.
specialist_managers_count (int):
Output only. The number of managers in this
SpecialistPool.
specialist_manager_emails (Sequence[str]):
The email addresses of the managers in the
SpecialistPool.
pending_data_labeling_jobs (Sequence[str]):
Output only. The resource name of the pending
data labeling jobs.
specialist_worker_emails (Sequence[str]):
The email addresses of workers in the
SpecialistPool.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
specialist_managers_count = proto.Field(proto.INT32, number=3,)
specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4,)
pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5,)
specialist_worker_emails = proto.RepeatedField(proto.STRING, number=7,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/types/specialist_pool.py | Python | apache-2.0 | 2,601 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Microsoft.AspNetCore.Routing
{
/// <summary>
/// Indicates whether ASP.NET routing is processing a URL from an HTTP request or generating a URL.
/// </summary>
public enum RouteDirection
{
/// <summary>
/// A URL from a client is being processed.
/// </summary>
IncomingRequest,
/// <summary>
/// A URL is being created based on the route definition.
/// </summary>
UrlGeneration,
}
}
| aspnet/AspNetCore | src/Http/Routing.Abstractions/src/RouteDirection.cs | C# | apache-2.0 | 622 |
data segment
x dw 0FFFFh
s db "00000",0Dh,0Ah,"$"
data ends
code segment
assume cs:code, ds:data
main:
mov ax, data
mov ds, ax
mov bx, 4; 下标
mov ax, [x]
next:
mov dx, 0; 保证被除数的高16位为0
mov cx, 10
div cx; (DX:AX)/CX=AX..DX
add dl, '0'
mov s[bx], dl
cmp ax, 0
je done
dec bx
jmp next
done:
mov ah, 9
mov dx, offset s
int 21h
mov ah, 4Ch
int 21h
code ends
end main
| Tao-J/hw | asm80386/16int2dec1.asm | Assembly | apache-2.0 | 437 |
---
layout: "fluid/docs_base"
version: "2.1.0"
versionHref: "/docs/v3/2.1.0"
path: ""
category: api
id: "img"
title: "Img"
header_sub_title: "Ionic API Documentation"
doc: "Img"
docType: "class"
---
<h1 class="api-title">
<a class="anchor" name="img" href="#img"></a>
Img
<h3><code>ion-img</code></h3>
</h1>
<a class="improve-v2-docs" href="http://github.com/ionic-team/ionic/edit/master//src/components/img/img.ts#L5">
Improve this doc
</a>
<p>Two of the biggest cuprits of scroll jank is starting up a new HTTP
request, and rendering images. These two reasons is largely why
<code>ion-img</code> was created. The standard HTML <code>img</code> element is often a large
source of these problems, and what makes matters worse is that the app
does not have fine-grained control of requests and rendering for each
<code>img</code> element.</p>
<p>The <code>ion-img</code> component is similar to the standard <code>img</code> element,
but it also adds features in order to provide improved performance.
Features include only loading images which are visible, using web workers
for HTTP requests, preventing jank while scrolling and in-memory caching.</p>
<p>Note that <code>ion-img</code> also comes with a few more restrictions in comparison
to the standard <code>img</code> element. A good rule is, if there are only a few
images to be rendered on a page, then the standard <code>img</code> is probably
best. However, if a page has the potential for hundreds or even thousands
of images within a scrollable area, then <code>ion-img</code> would be better suited
for the job.</p>
<blockquote>
<p>Note: <code>ion-img</code> is only meant to be used inside of <a href="/docs/api/components/virtual-scroll/VirtualScroll/">virtual-scroll</a></p>
</blockquote>
<h3 id="lazy-loading">Lazy Loading</h3>
<p>Lazy loading images refers to only loading images which are actually
visible within the user's viewport. This also means that images which are
not viewable on the initial load would not be downloaded or rendered. Next,
as the user scrolls, each image which becomes visible is then requested
then rendered on-demand.</p>
<p>The benefits of this approach is that unnecessary and resource intensive
HTTP requests are not started, valuable bandwidth isn't wasted, and this
allows the browser to free up resources which would be wasted on images
which are not even viewable. For example, animated GIFs are enourmous
performance drains, however, with <code>ion-img</code> the app is able to dedicate
resources to just the viewable images. But again, if the problems listed
above are not problems within your app, then the standard <code>img</code> element
may be best.</p>
<h3 id="image-dimensions">Image Dimensions</h3>
<p>By providing image dimensions up front, Ionic is able to accurately size
up the image's location within the viewport, which helps lazy load only
images which are viewable. Image dimensions can either by set as
properties, inline styles, or external stylesheets. It doesn't matter
which method of setting dimensions is used, but it's important that somehow
each <code>ion-img</code> has been given an exact size.</p>
<p>For example, by default <code><ion-avatar></code> and <code><ion-thumbnail></code> already come
with exact sizes when placed within an <code><ion-item></code>. By giving each image
an exact size, this then further locks in the size of each <code>ion-item</code>,
which again helps improve scroll performance.</p>
<pre><code class="lang-html"><!-- dimensions set using attributes -->
<ion-img width="80" height="80" src="..."></ion-img>
<!-- dimensions set using input properties -->
<ion-img [width]="imgWidth" [height]="imgHeight" src="..."></ion-img>
<!-- dimensions set using inline styles -->
<ion-img style="width: 80px; height: 80px;" src="..."></ion-img>
</code></pre>
<p>Additionally, each <code>ion-img</code> uses the <code>object-fit: cover</code> CSS property.
What this means is that the actual rendered image will center itself within
it's container. Or to really get detailed: The image is sized to maintain
its aspect ratio while filling the containing element’s entire content box.
Its concrete object size is resolved as a cover constraint against the
element’s used width and height.</p>
<h3 id="future-optimizations">Future Optimizations</h3>
<p>Future goals are to place image requests within web workers, and cache
images in-memory as datauris. This method has proven to be effective,
however there are some current limitations with Cordova which we are
currently working on.</p>
<!-- @usage tag -->
<!-- @property tags -->
<!-- instance methods on the class -->
<!-- input methods on the class -->
<h2><a class="anchor" name="input-properties" href="#input-properties"></a>Input Properties</h2>
<table class="table param-table" style="margin:0;">
<thead>
<tr>
<th>Attr</th>
<th>Type</th>
<th>Details</th>
</tr>
</thead>
<tbody>
<tr>
<td>alt</td>
<td><code>string</code></td>
<td><p> Set the <code>alt</code> attribute which gets assigned to
the inner <code>img</code> element.</p>
</td>
</tr>
<tr>
<td>bounds</td>
<td><code>any</code></td>
<td><p> Sets the bounding rectangle of the element relative to the viewport.
When using <code>VirtualScroll</code>, each virtual item should pass its bounds to each
<code>ion-img</code>. The passed in data object should include <code>top</code> and <code>bottom</code> properties.</p>
</td>
</tr>
<tr>
<td>cache</td>
<td><code>boolean</code></td>
<td><p> After an image has been successfully downloaded, it can be cached
in-memory. This is useful for <code>VirtualScroll</code> by allowing image responses to be
cached, and not rendered, until after scrolling has completed, which allows for
smoother scrolling.</p>
</td>
</tr>
<tr>
<td>height</td>
<td><code>string</code></td>
<td><p> Image height. If this property is not set it's important that
the dimensions are still set using CSS. If the dimension is just a number it
will assume the <code>px</code> unit.</p>
</td>
</tr>
<tr>
<td>src</td>
<td><code>string</code></td>
<td><p> The source of the image.</p>
</td>
</tr>
<tr>
<td>width</td>
<td><code>string</code></td>
<td><p> Image width. If this property is not set it's important that
the dimensions are still set using CSS. If the dimension is just a number it
will assume the <code>px</code> unit.</p>
</td>
</tr>
</tbody>
</table>
<h2 id="sass-variable-header"><a class="anchor" name="sass-variables" href="#sass-variables"></a>Sass Variables</h2>
<div id="sass-variables" ng-controller="SassToggleCtrl">
<div class="sass-platform-toggle">
<h3 ng-init="setSassPlatform('base')">All</h3>
</div>
<table ng-show="active === 'base'" id="sass-base" class="table param-table" style="margin:0;">
<thead>
<tr>
<th>Property</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>$img-placeholder-background</code></td>
<td><code>#eee</code></td>
<td><p>Color of the image when it hasn't fully loaded yet</p>
</td>
</tr>
</tbody>
</table>
</div>
<!-- related link --><!-- end content block -->
<!-- end body block -->
| driftyco/ionic-site | content/docs/v3/2.1.0/api/components/img/Img/index.md | Markdown | apache-2.0 | 7,664 |
---
title: Gérer les accès à plusieurs clusters kunernetes
date: 2018-10-30 00:00:00 Z
tags:
- docker
- truc&astuce
- k8s
subtitle: plusieurs fichiers de config ? un fichier de config
comments: true
thumbnail: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/1200px-Kubernetes_logo_without_workmark.svg.png
---
## Fusion de configuration
1. Enregistrer tous les fichiers de config dans `~/.kube/` dans des fichiers nommés `config.ENV`
2. Fusionner les configs : `KUBECONFIG=$(find ~/.kube -maxdepth 1 -type f -name "*config*" ! -name config | tr '\n' ':') kubectl config view --flatten > ~/.kube/config`
## Outils pratiques
- [kubectx](https://kubectx.dev/) : changer facilement de cluster
- [kubens](https://kubectx.dev/) : changer facilement de namespace
| tcoupin/tcoupin.github.io | _posts/2020-04-07-manage-multiple-k8s-config.md | Markdown | apache-2.0 | 811 |
package com.etiennelawlor.loop.network.models.response;
import android.os.Parcel;
import android.os.Parcelable;
import com.google.gson.annotations.SerializedName;
/**
* Created by etiennelawlor on 5/23/15.
*/
public class Tag implements Parcelable {
// region Fields
@SerializedName("uri")
private String uri;
@SerializedName("name")
private String name;
@SerializedName("tag")
private String tag;
@SerializedName("canonical")
private String canonical;
// endregion
// region Constructors
public Tag() {
}
protected Tag(Parcel in) {
this.uri = in.readString();
this.name = in.readString();
this.tag = in.readString();
this.canonical = in.readString();
}
// endregion
// region Getters
public String getUri() {
return uri;
}
public String getName() {
return name;
}
public String getTag() {
return tag;
}
public String getCanonical() {
return canonical;
}
// endregion
// region Setters
public void setUri(String uri) {
this.uri = uri;
}
public void setName(String name) {
this.name = name;
}
public void setTag(String tag) {
this.tag = tag;
}
public void setCanonical(String canonical) {
this.canonical = canonical;
}
// endregion
// region Parcelable Methods
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(this.uri);
dest.writeString(this.name);
dest.writeString(this.tag);
dest.writeString(this.canonical);
}
// endregion
public static final Parcelable.Creator<Tag> CREATOR = new Parcelable.Creator<Tag>() {
@Override
public Tag createFromParcel(Parcel source) {
return new Tag(source);
}
@Override
public Tag[] newArray(int size) {
return new Tag[size];
}
};
}
| lawloretienne/Loop | app/src/main/java/com/etiennelawlor/loop/network/models/response/Tag.java | Java | apache-2.0 | 2,066 |
# -*- coding: utf-8 -*-
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Prints the env_setup banner for cmd.exe.
This is done from Python as activating colors and printing ASCII art are not
easy to do in cmd.exe. Activated colors also don't persist in the parent
process.
"""
from __future__ import print_function
import argparse
import os
import sys
from .colors import Color, enable_colors # type: ignore
_PIGWEED_BANNER = u'''
▒█████▄ █▓ ▄███▒ ▒█ ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
▒█░ █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█ ▒█ ▀ ▒█ ▀ ▒█ ▀█▌
▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█ ▒███ ▒███ ░█ █▌
▒█▀ ░█░ ▓█ █▓ ░█░ █ ▒█ ▒█ ▄ ▒█ ▄ ░█ ▄█▌
▒█ ░█░ ░▓███▀ ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
'''
def print_banner(bootstrap, no_shell_file):
"""Print the Pigweed or project-specific banner"""
enable_colors()
print(Color.green('\n WELCOME TO...'))
print(Color.magenta(_PIGWEED_BANNER))
if bootstrap:
print(
Color.green('\n BOOTSTRAP! Bootstrap may take a few minutes; '
'please be patient'))
print(
Color.green(
' On Windows, this stage is extremely slow (~10 minutes).\n'))
else:
print(
Color.green(
'\n ACTIVATOR! This sets your console environment variables.\n'
))
if no_shell_file:
print(Color.bold_red('Error!\n'))
print(
Color.red(' Your Pigweed environment does not seem to be'
' configured.'))
print(Color.red(' Run bootstrap.bat to perform initial setup.'))
return 0
def parse():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--bootstrap', action='store_true')
parser.add_argument('--no-shell-file', action='store_true')
return parser.parse_args()
def main():
"""Script entry point."""
if os.name != 'nt':
return 1
return print_banner(**vars(parse()))
if __name__ == '__main__':
sys.exit(main())
| google/pigweed | pw_env_setup/py/pw_env_setup/windows_env_start.py | Python | apache-2.0 | 2,955 |
package rvc.ann;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* @author nurmuhammad
*/
@Retention(RUNTIME)
@Target(METHOD)
public @interface OPTIONS {
String value() default Constants.NULL_VALUE;
boolean absolutePath() default false;
} | nurmuhammad/rvc | src/main/java/rvc/ann/OPTIONS.java | Java | apache-2.0 | 399 |
package com.xiaojinzi.component.bean;
import javax.lang.model.element.Element;
/**
* time : 2018/07/26
*
* @author : xiaojinzi
*/
public class RouterDegradeAnnoBean {
/**
* 优先级
*/
private int priority;
/**
* 是一个类实现了 RouterDegrade 接口
*/
private Element rawType;
public int getPriority() {
return priority;
}
public void setPriority(int priority) {
this.priority = priority;
}
public Element getRawType() {
return rawType;
}
public void setRawType(Element rawType) {
this.rawType = rawType;
}
}
| xiaojinzi123/Component | ComponentCompiler/src/main/java/com/xiaojinzi/component/bean/RouterDegradeAnnoBean.java | Java | apache-2.0 | 632 |
package Paws::EC2::DhcpConfiguration;
use Moose;
has Key => (is => 'ro', isa => 'Str', request_name => 'key', traits => ['NameInRequest']);
has Values => (is => 'ro', isa => 'ArrayRef[Paws::EC2::AttributeValue]', request_name => 'valueSet', traits => ['NameInRequest']);
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::DhcpConfiguration
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::EC2::DhcpConfiguration object:
$service_obj->Method(Att1 => { Key => $value, ..., Values => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::EC2::DhcpConfiguration object:
$result = $service_obj->Method(...);
$result->Att1->Key
=head1 DESCRIPTION
This class has no description
=head1 ATTRIBUTES
=head2 Key => Str
The name of a DHCP option.
=head2 Values => ArrayRef[L<Paws::EC2::AttributeValue>]
One or more values for the DHCP option.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::EC2>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/EC2/DhcpConfiguration.pm | Perl | apache-2.0 | 1,512 |
---
layout: article
title: "Constructing the Object Model"
description: "Before the browser can render content to the screen it needs to construct the DOM and CSSOM trees. As a result, we need to ensure that we deliver both the HTML and CSS to the browser as quickly as possible."
introduction: "Before the browser can render the page it needs to construct the DOM and CSSOM trees. As a result, we need to ensure that we deliver both the HTML and CSS to the browser as quickly as possible."
article:
written_on: 2014-04-01
updated_on: 2014-04-28
order: 1
collection: critical-rendering-path
key-takeaways:
construct-object-model:
- Bytes → characters → tokens → nodes → object model
- HTML markup is transformed into a Document Object Model (DOM), CSS markup is transformed into a CSS Object Model (CSSOM)
- DOM and CSSOM are independent data structures
- Chrome DevTools Timeline allows us to capture and inspect the construction and processing costs of DOM and CSSOM
notes:
devtools:
- We'll assume that you have basic familiarity with Chrome DevTools - i.e. you know how to capture a network waterfall, or record a timeline. If you need a quick refresher, check out the <a href="https://developers.google.com/chrome-developer-tools/">Chrome Developer Tools documentation</a>, or if you're new to DevTools, I recommend taking the Codeschool <a href="http://discover-devtools.codeschool.com/">Discover DevTools</a> course.
---
{% wrap content%}
<style>
img, video, object {
max-width: 100%;
}
img.center {
display: block;
margin-left: auto;
margin-right: auto;
}
</style>
{% include modules/toc.liquid %}
{% include modules/takeaway.liquid list=page.key-takeaways.construct-object-model %}
## Document Object Model (DOM)
{% include_code _code/basic_dom.html full %}
Let’s start, with the simplest possible case: a plain HTML page with some text and a single image. What does the browser need to do to process this simple page?
<img src="images/full-process.png" alt="DOM construction process">
1. **Conversion:** the browser reads the raw bytes of the HTML off the disk or network and translates them to individual characters based on specified encoding of the file (e.g. UTF-8).
1. **Tokenizing:** the browser converts strings of characters into distinct tokens specified by the [W3C HTML5 standard](http://www.w3.org/TR/html5/) - e.g. "<html>", "<body>" and other strings within the "angle brackets". Each token has a special meaning and a set of rules.
1. **Lexing:** the emitted tokens are converted into "objects" which define their properties and rules.
1. **DOM construction:** Finally, because the HTML markup defines relationships between different tags (some tags are contained within tags) the created objects are linked in a tree data structure that also captures the parent-child relationships defined in the original markup: _HTML_ object is a parent of the _body_ object, the _body_ is a parent of the _paragraph_ object, and so on.
<img src="images/dom-tree.png" class="center" alt="DOM tree">
**The final output of this entire process is the Document Object Model, or the "DOM" of our simple page, which the browser uses for all further processing of the page.**
Every time the browser has to process HTML markup it has to step through all of the steps above: convert bytes to characters, identify tokens, convert tokens to nodes, and build the DOM tree. This entire process can take some time, especially if we have a large amount of HTML to process.
<img src="images/dom-timeline.png" class="center" alt="Tracing DOM construction in DevTools">
{% include modules/remember.liquid title="Note" list=page.notes.devtools %}
If you open up Chrome DevTools and record a timeline while the page is loaded, you can see the actual time taken to perform this step — in example above, it took us ~5ms to convert a chunk of HTML bytes into a DOM tree. Of course, if the page was larger, as most pages are, this process might take significantly longer. You will see in our future sections on creating smooth animations that this can easily become your bottleneck if the browser has to process large amounts of HTML. That said, let’s not get ahead of ourselves…
With the DOM tree ready, do we have enough information to render the page to the screen? Not yet! The DOM tree captures the properties and relationships of the document markup, but it does not tell us anything about how the element should look when rendered. That’s the responsibility of the CSSOM, which we turn to next!
## CSS Object Model (CSSOM)
While the browser was constructing the DOM of our simple page, it encountered a link tag in the head section of the document referencing an external CSS stylesheet: style.css. Anticipating that it will need this resource to render the page, it immediately dispatches a request for this resource, which comes back with the following content:
{% include_code _code/style.css full css %}
Of course, we could have declared our styles directly within the HTML markup (inline), but keeping our CSS independent of HTML allows us to treat content and design as separate concerns: the designers can work on CSS, developers can focus on HTML, and so on.
Just as with HTML, we need to convert the received CSS rules into something that the browser can understand and work with. Hence, once again, we repeat a very similar process as we did with HTML:
<img src="images/cssom-construction.png" class="center" alt="CSSOM construction steps">
The CSS bytes are converted into characters, then to tokens and nodes, and finally are linked into a tree structure known as the "CSS Object Model", or CSSOM for short:
<img src="images/cssom-tree.png" class="center" alt="CSSOM tree">
Why does the CSSOM have a tree structure? When computing the final set of styles for any object on the page, the browser starts with the most general rule applicable to that node (e.g. if it is a child of body element, then all body styles apply) and then recursively refines the computed styles by applying more specific rules - i.e. the rules "cascade down".
To make it more concrete, consider the CSSOM tree above. Any text contained within the _span_ tag that is placed within the body element will have a font size of 16 pixels and have red text - the font-size directive cascades down from body to the span. However, if a span tag is child of a paragraph (p) tag, then its contents are not displayed.
Also, note that the above tree is not the complete CSSOM tree and only shows the styles we decided to override in our stylesheet. Every browser provides a default set of styles also known as "user agent styles" -- that’s what we see when we don’t provide any of our own -- and our styles simply override these defaults (e.g. [default IE styles](http://www.iecss.com/)). If you have ever inspected your "computed styles" in Chrome DevTools and wondered where all the styles are coming from, now you know!
Curious to know how long the CSS processing took? Record a timeline in DevTools and look for "Recalculate Style" event: unlike DOM parsing, the timeline doesn’t show a separate "Parse CSS" entry, and instead captures parsing and CSSOM tree construction, plus the recursive calculation of computed styles under this one event.
<img src="images/cssom-timeline.png" class="center" alt="Tracing CSSOM construction in DevTools">
Our trivial stylesheet takes ~0.6ms to process and affects 8 elements on the page -- not much, but once again, not free. However, where did the 8 elements come from? The CSSOM and DOM and are independent data structures! Turns out, the browser is hiding an important step. Next, lets talk about the render tree that links the DOM and CSSOM together.
{% include modules/nextarticle.liquid %}
{% endwrap %}
| SuriyaaKudoIsc/WebFundamentals | src/site/performance/critical-rendering-path/constructing-the-object-model.markdown | Markdown | apache-2.0 | 7,835 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.Archiveable;
import org.apache.flink.api.common.InputDependencyConstraint;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.runtime.JobException;
import org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult;
import org.apache.flink.runtime.checkpoint.CheckpointOptions;
import org.apache.flink.runtime.checkpoint.CheckpointType;
import org.apache.flink.runtime.checkpoint.JobManagerTaskRestore;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor;
import org.apache.flink.runtime.concurrent.FutureUtils;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.instance.SlotSharingGroupId;
import org.apache.flink.runtime.io.network.partition.PartitionTracker;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.LocationPreferenceConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.jobmaster.LogicalSlot;
import org.apache.flink.runtime.jobmaster.SlotRequestId;
import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.messages.StackTraceSampleResponse;
import org.apache.flink.runtime.shuffle.PartitionDescriptor;
import org.apache.flink.runtime.shuffle.ProducerDescriptor;
import org.apache.flink.runtime.shuffle.ShuffleDescriptor;
import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.OptionalFailure;
import org.apache.flink.util.function.ThrowingRunnable;
import org.slf4j.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELED;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELING;
import static org.apache.flink.runtime.execution.ExecutionState.CREATED;
import static org.apache.flink.runtime.execution.ExecutionState.DEPLOYING;
import static org.apache.flink.runtime.execution.ExecutionState.FAILED;
import static org.apache.flink.runtime.execution.ExecutionState.FINISHED;
import static org.apache.flink.runtime.execution.ExecutionState.RUNNING;
import static org.apache.flink.runtime.execution.ExecutionState.SCHEDULED;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times
* (for recovery, re-computation, re-configuration), this class tracks the state of a single execution
* of that vertex and the resources.
*
* <h2>Lock free state transitions</h2>
*
* <p>In several points of the code, we need to deal with possible concurrent state changes and actions.
* For example, while the call to deploy a task (send it to the TaskManager) happens, the task gets cancelled.
*
* <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to running) such that
* it is guaranteed that any "cancel command" will only pick up after deployment is done and that the "cancel
* command" call will never overtake the deploying call.
*
* <p>This blocks the threads big time, because the remote calls may take long. Depending of their locking behavior, it
* may even result in distributed deadlocks (unless carefully avoided). We therefore use atomic state updates and
* occasional double-checking to ensure that the state after a completed call is as expected, and trigger correcting
* actions if it is not. Many actions are also idempotent (like canceling).
*/
public class Execution implements AccessExecution, Archiveable<ArchivedExecution>, LogicalSlot.Payload {
private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state");
private static final AtomicReferenceFieldUpdater<Execution, LogicalSlot> ASSIGNED_SLOT_UPDATER = AtomicReferenceFieldUpdater.newUpdater(
Execution.class,
LogicalSlot.class,
"assignedResource");
private static final Logger LOG = ExecutionGraph.LOG;
private static final int NUM_CANCEL_CALL_TRIES = 3;
private static final int NUM_STOP_CALL_TRIES = 3;
// --------------------------------------------------------------------------------------------
/** The executor which is used to execute futures. */
private final Executor executor;
/** The execution vertex whose task this execution executes. */
private final ExecutionVertex vertex;
/** The unique ID marking the specific execution instant of the task. */
private final ExecutionAttemptID attemptId;
/** Gets the global modification version of the execution graph when this execution was created.
* This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions. */
private final long globalModVersion;
/** The timestamps when state transitions occurred, indexed by {@link ExecutionState#ordinal()}. */
private final long[] stateTimestamps;
private final int attemptNumber;
private final Time rpcTimeout;
private final Collection<PartitionInfo> partitionInfos;
/** A future that completes once the Execution reaches a terminal ExecutionState. */
private final CompletableFuture<ExecutionState> terminalStateFuture;
private final CompletableFuture<?> releaseFuture;
private final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture;
private volatile ExecutionState state = CREATED;
private volatile LogicalSlot assignedResource;
private volatile Throwable failureCause; // once assigned, never changes
/** Information to restore the task on recovery, such as checkpoint id and task state snapshot. */
@Nullable
private volatile JobManagerTaskRestore taskRestore;
/** This field holds the allocation id once it was assigned successfully. */
@Nullable
private volatile AllocationID assignedAllocationID;
// ------------------------ Accumulators & Metrics ------------------------
/** Lock for updating the accumulators atomically.
* Prevents final accumulators to be overwritten by partial accumulators on a late heartbeat. */
private final Object accumulatorLock = new Object();
/* Continuously updated map of user-defined accumulators */
private volatile Map<String, Accumulator<?, ?>> userAccumulators;
private volatile IOMetrics ioMetrics;
private Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions;
// --------------------------------------------------------------------------------------------
/**
* Creates a new Execution attempt.
*
* @param executor
* The executor used to dispatch callbacks from futures and asynchronous RPC calls.
* @param vertex
* The execution vertex to which this Execution belongs
* @param attemptNumber
* The execution attempt number.
* @param globalModVersion
* The global modification version of the execution graph when this execution was created
* @param startTimestamp
* The timestamp that marks the creation of this Execution
* @param rpcTimeout
* The rpcTimeout for RPC calls like deploy/cancel/stop.
*/
public Execution(
Executor executor,
ExecutionVertex vertex,
int attemptNumber,
long globalModVersion,
long startTimestamp,
Time rpcTimeout) {
this.executor = checkNotNull(executor);
this.vertex = checkNotNull(vertex);
this.attemptId = new ExecutionAttemptID();
this.rpcTimeout = checkNotNull(rpcTimeout);
this.globalModVersion = globalModVersion;
this.attemptNumber = attemptNumber;
this.stateTimestamps = new long[ExecutionState.values().length];
markTimestamp(CREATED, startTimestamp);
this.partitionInfos = new ArrayList<>(16);
this.producedPartitions = Collections.emptyMap();
this.terminalStateFuture = new CompletableFuture<>();
this.releaseFuture = new CompletableFuture<>();
this.taskManagerLocationFuture = new CompletableFuture<>();
this.assignedResource = null;
}
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
public ExecutionVertex getVertex() {
return vertex;
}
@Override
public ExecutionAttemptID getAttemptId() {
return attemptId;
}
@Override
public int getAttemptNumber() {
return attemptNumber;
}
@Override
public ExecutionState getState() {
return state;
}
@Nullable
public AllocationID getAssignedAllocationID() {
return assignedAllocationID;
}
/**
* Gets the global modification version of the execution graph when this execution was created.
*
* <p>This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions.
*/
public long getGlobalModVersion() {
return globalModVersion;
}
public CompletableFuture<TaskManagerLocation> getTaskManagerLocationFuture() {
return taskManagerLocationFuture;
}
public LogicalSlot getAssignedResource() {
return assignedResource;
}
public Optional<ResultPartitionDeploymentDescriptor> getResultPartitionDeploymentDescriptor(
IntermediateResultPartitionID id) {
return Optional.ofNullable(producedPartitions.get(id));
}
/**
* Tries to assign the given slot to the execution. The assignment works only if the
* Execution is in state SCHEDULED. Returns true, if the resource could be assigned.
*
* @param logicalSlot to assign to this execution
* @return true if the slot could be assigned to the execution, otherwise false
*/
@VisibleForTesting
boolean tryAssignResource(final LogicalSlot logicalSlot) {
assertRunningInJobMasterMainThread();
checkNotNull(logicalSlot);
// only allow to set the assigned resource in state SCHEDULED or CREATED
// note: we also accept resource assignment when being in state CREATED for testing purposes
if (state == SCHEDULED || state == CREATED) {
if (ASSIGNED_SLOT_UPDATER.compareAndSet(this, null, logicalSlot)) {
if (logicalSlot.tryAssignPayload(this)) {
// check for concurrent modification (e.g. cancelling call)
if ((state == SCHEDULED || state == CREATED) && !taskManagerLocationFuture.isDone()) {
taskManagerLocationFuture.complete(logicalSlot.getTaskManagerLocation());
assignedAllocationID = logicalSlot.getAllocationId();
return true;
} else {
// free assigned resource and return false
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
// the slot already has another slot assigned
return false;
}
} else {
// do not allow resource assignment if we are not in state SCHEDULED
return false;
}
}
public InputSplit getNextInputSplit() {
final LogicalSlot slot = this.getAssignedResource();
final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null;
return this.vertex.getNextInputSplit(host);
}
@Override
public TaskManagerLocation getAssignedResourceLocation() {
// returns non-null only when a location is already assigned
final LogicalSlot currentAssignedResource = assignedResource;
return currentAssignedResource != null ? currentAssignedResource.getTaskManagerLocation() : null;
}
public Throwable getFailureCause() {
return failureCause;
}
@Override
public String getFailureCauseAsString() {
return ExceptionUtils.stringifyException(getFailureCause());
}
@Override
public long[] getStateTimestamps() {
return stateTimestamps;
}
@Override
public long getStateTimestamp(ExecutionState state) {
return this.stateTimestamps[state.ordinal()];
}
public boolean isFinished() {
return state.isTerminal();
}
@Nullable
public JobManagerTaskRestore getTaskRestore() {
return taskRestore;
}
/**
* Sets the initial state for the execution. The serialized state is then shipped via the
* {@link TaskDeploymentDescriptor} to the TaskManagers.
*
* @param taskRestore information to restore the state
*/
public void setInitialState(@Nullable JobManagerTaskRestore taskRestore) {
this.taskRestore = taskRestore;
}
/**
* Gets a future that completes once the task execution reaches a terminal state.
* The future will be completed with specific state that the execution reached.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the execution reaches a terminal state
*/
@Override
public CompletableFuture<ExecutionState> getTerminalStateFuture() {
return terminalStateFuture;
}
/**
* Gets the release future which is completed once the execution reaches a terminal
* state and the assigned resource has been released.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the assigned resource has been released
*/
public CompletableFuture<?> getReleaseFuture() {
return releaseFuture;
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
public CompletableFuture<Void> scheduleForExecution() {
final ExecutionGraph executionGraph = getVertex().getExecutionGraph();
final SlotProvider resourceProvider = executionGraph.getSlotProvider();
final boolean allowQueued = executionGraph.isQueuedSchedulingAllowed();
return scheduleForExecution(
resourceProvider,
allowQueued,
LocationPreferenceConstraint.ANY,
Collections.emptySet());
}
/**
* NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if the tasks needs
* to be scheduled immediately and no resource is available. If the task is accepted by the schedule, any
* error sets the vertex state to failed and triggers the recovery logic.
*
* @param slotProvider The slot provider to use to allocate slot for this execution attempt.
* @param queued Flag to indicate whether the scheduler may queue this task if it cannot
* immediately deploy it.
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @return Future which is completed once the Execution has been deployed
*/
public CompletableFuture<Void> scheduleForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) {
assertRunningInJobMasterMainThread();
final ExecutionGraph executionGraph = vertex.getExecutionGraph();
final Time allocationTimeout = executionGraph.getAllocationTimeout();
try {
final CompletableFuture<Execution> allocationFuture = allocateResourcesForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout);
final CompletableFuture<Void> deploymentFuture;
if (allocationFuture.isDone() || queued) {
deploymentFuture = allocationFuture.thenRun(ThrowingRunnable.unchecked(this::deploy));
} else {
deploymentFuture = FutureUtils.completedExceptionally(
new IllegalArgumentException("The slot allocation future has not been completed yet."));
}
deploymentFuture.whenComplete(
(Void ignored, Throwable failure) -> {
if (failure != null) {
final Throwable stripCompletionException = ExceptionUtils.stripCompletionException(failure);
final Throwable schedulingFailureCause;
if (stripCompletionException instanceof TimeoutException) {
schedulingFailureCause = new NoResourceAvailableException(
"Could not allocate enough slots within timeout of " + allocationTimeout + " to run the job. " +
"Please make sure that the cluster has enough resources.");
} else {
schedulingFailureCause = stripCompletionException;
}
markFailed(schedulingFailureCause);
}
});
return deploymentFuture;
} catch (IllegalExecutionStateException e) {
return FutureUtils.completedExceptionally(e);
}
}
/**
* Allocates resources for the execution.
*
* <p>Allocates following resources:
* <ol>
* <li>slot obtained from the slot provider</li>
* <li>registers produced partitions with the {@link org.apache.flink.runtime.shuffle.ShuffleMaster}</li>
* </ol>
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with this execution once the slot has been assigned
* or with an exception if an error occurred.
*/
CompletableFuture<Execution> allocateResourcesForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
return allocateAndAssignSlotForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout)
.thenCompose(slot -> registerProducedPartitions(slot.getTaskManagerLocation()));
}
/**
* Allocates and assigns a slot obtained from the slot provider to the execution.
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with the allocated slot once it has been assigned
* or with an exception if an error occurred.
*/
private CompletableFuture<LogicalSlot> allocateAndAssignSlotForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
checkNotNull(slotProvider);
assertRunningInJobMasterMainThread();
final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();
// sanity check
if (locationConstraint != null && sharingGroup == null) {
throw new IllegalStateException(
"Trying to schedule with co-location constraint but without slot sharing allowed.");
}
// this method only works if the execution is in the state 'CREATED'
if (transitionState(CREATED, SCHEDULED)) {
final SlotSharingGroupId slotSharingGroupId = sharingGroup != null ? sharingGroup.getSlotSharingGroupId() : null;
ScheduledUnit toSchedule = locationConstraint == null ?
new ScheduledUnit(this, slotSharingGroupId) :
new ScheduledUnit(this, slotSharingGroupId, locationConstraint);
// try to extract previous allocation ids, if applicable, so that we can reschedule to the same slot
ExecutionVertex executionVertex = getVertex();
AllocationID lastAllocation = executionVertex.getLatestPriorAllocation();
Collection<AllocationID> previousAllocationIDs =
lastAllocation != null ? Collections.singletonList(lastAllocation) : Collections.emptyList();
// calculate the preferred locations
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture =
calculatePreferredLocations(locationPreferenceConstraint);
final SlotRequestId slotRequestId = new SlotRequestId();
final CompletableFuture<LogicalSlot> logicalSlotFuture =
preferredLocationsFuture.thenCompose(
(Collection<TaskManagerLocation> preferredLocations) ->
slotProvider.allocateSlot(
slotRequestId,
toSchedule,
new SlotProfile(
ResourceProfile.UNKNOWN,
preferredLocations,
previousAllocationIDs,
allPreviousExecutionGraphAllocationIds),
queued,
allocationTimeout));
// register call back to cancel slot request in case that the execution gets canceled
releaseFuture.whenComplete(
(Object ignored, Throwable throwable) -> {
if (logicalSlotFuture.cancel(false)) {
slotProvider.cancelSlotRequest(
slotRequestId,
slotSharingGroupId,
new FlinkException("Execution " + this + " was released."));
}
});
// This forces calls to the slot pool back into the main thread, for normal and exceptional completion
return logicalSlotFuture.handle(
(LogicalSlot logicalSlot, Throwable failure) -> {
if (failure != null) {
throw new CompletionException(failure);
}
if (tryAssignResource(logicalSlot)) {
return logicalSlot;
} else {
// release the slot
logicalSlot.releaseSlot(new FlinkException("Could not assign logical slot to execution " + this + '.'));
throw new CompletionException(
new FlinkException(
"Could not assign slot " + logicalSlot + " to execution " + this + " because it has already been assigned "));
}
});
} else {
// call race, already deployed, or already done
throw new IllegalExecutionStateException(this, CREATED, state);
}
}
@VisibleForTesting
CompletableFuture<Execution> registerProducedPartitions(TaskManagerLocation location) {
assertRunningInJobMasterMainThread();
return FutureUtils.thenApplyAsyncIfNotDone(
registerProducedPartitions(vertex, location, attemptId),
vertex.getExecutionGraph().getJobMasterMainThreadExecutor(),
producedPartitionsCache -> {
producedPartitions = producedPartitionsCache;
startTrackingPartitions(location.getResourceID(), producedPartitionsCache.values());
return this;
});
}
@VisibleForTesting
static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions(
ExecutionVertex vertex,
TaskManagerLocation location,
ExecutionAttemptID attemptId) {
ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId);
boolean lazyScheduling = vertex.getExecutionGraph().getScheduleMode().allowLazyDeployment();
Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values();
Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations =
new ArrayList<>(partitions.size());
for (IntermediateResultPartition partition : partitions) {
PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition);
int maxParallelism = getPartitionMaxParallelism(partition);
CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex
.getExecutionGraph()
.getShuffleMaster()
.registerPartitionWithProducer(partitionDescriptor, producerDescriptor);
final boolean releasePartitionOnConsumption =
vertex.getExecutionGraph().isForcePartitionReleaseOnConsumption()
|| !partitionDescriptor.getPartitionType().isBlocking();
CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture
.thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor(
partitionDescriptor,
shuffleDescriptor,
maxParallelism,
lazyScheduling,
releasePartitionOnConsumption
? ShuffleDescriptor.ReleaseType.AUTO
: ShuffleDescriptor.ReleaseType.MANUAL));
partitionRegistrations.add(partitionRegistration);
}
return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> {
Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions =
new LinkedHashMap<>(partitions.size());
rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd));
return producedPartitions;
});
}
private static int getPartitionMaxParallelism(IntermediateResultPartition partition) {
// TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs!
final List<List<ExecutionEdge>> consumers = partition.getConsumers();
int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
if (!consumers.isEmpty()) {
List<ExecutionEdge> consumer = consumers.get(0);
ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex();
maxParallelism = consumerVertex.getMaxParallelism();
}
return maxParallelism;
}
/**
* Deploys the execution to the previously assigned resource.
*
* @throws JobException if the execution cannot be deployed to the assigned resource
*/
public void deploy() throws JobException {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
checkNotNull(slot, "In order to deploy the execution we first have to assign a resource via tryAssignResource.");
// Check if the TaskManager died in the meantime
// This only speeds up the response to TaskManagers failing concurrently to deployments.
// The more general check is the rpcTimeout of the deployment call
if (!slot.isAlive()) {
throw new JobException("Target slot (TaskManager) for deployment is no longer alive.");
}
// make sure exactly one deployment call happens from the correct state
// note: the transition from CREATED to DEPLOYING is for testing purposes only
ExecutionState previous = this.state;
if (previous == SCHEDULED || previous == CREATED) {
if (!transitionState(previous, DEPLOYING)) {
// race condition, someone else beat us to the deploying call.
// this should actually not happen and indicates a race somewhere else
throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race.");
}
}
else {
// vertex may have been cancelled, or it was already scheduled
throw new IllegalStateException("The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous);
}
if (this != slot.getPayload()) {
throw new IllegalStateException(
String.format("The execution %s has not been assigned to the assigned slot.", this));
}
try {
// race double check, did we fail/cancel and do we need to release the slot?
if (this.state != DEPLOYING) {
slot.releaseSlot(new FlinkException("Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING."));
return;
}
if (LOG.isInfoEnabled()) {
LOG.info(String.format("Deploying %s (attempt #%d) to %s", vertex.getTaskNameWithSubtaskIndex(),
attemptNumber, getAssignedResourceLocation()));
}
final TaskDeploymentDescriptor deployment = TaskDeploymentDescriptorFactory
.fromExecutionVertex(vertex, attemptNumber)
.createDeploymentDescriptor(
slot.getAllocationId(),
slot.getPhysicalSlotNumber(),
taskRestore,
producedPartitions.values());
// null taskRestore to let it be GC'ed
taskRestore = null;
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
vertex.getExecutionGraph().getJobMasterMainThreadExecutor();
// We run the submission in the future executor so that the serialization of large TDDs does not block
// the main thread and sync back to the main thread once submission is completed.
CompletableFuture.supplyAsync(() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor)
.thenCompose(Function.identity())
.whenCompleteAsync(
(ack, failure) -> {
// only respond to the failure case
if (failure != null) {
if (failure instanceof TimeoutException) {
String taskname = vertex.getTaskNameWithSubtaskIndex() + " (" + attemptId + ')';
markFailed(new Exception(
"Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation()
+ ") not responding after a rpcTimeout of " + rpcTimeout, failure));
} else {
markFailed(failure);
}
}
},
jobMasterMainThreadExecutor);
}
catch (Throwable t) {
markFailed(t);
ExceptionUtils.rethrow(t);
}
}
public void cancel() {
// depending on the previous state, we go directly to cancelled (no cancel call necessary)
// -- or to canceling (cancel call needs to be sent to the task manager)
// because of several possibly previous states, we need to again loop until we make a
// successful atomic state transition
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == CANCELING || current == CANCELED) {
// already taken care of, no need to cancel again
return;
}
// these two are the common cases where we need to send a cancel call
else if (current == RUNNING || current == DEPLOYING) {
// try to transition to canceling, if successful, send the cancel call
if (startCancelling(NUM_CANCEL_CALL_TRIES)) {
return;
}
// else: fall through the loop
}
else if (current == FINISHED || current == FAILED) {
// nothing to do any more. finished/failed before it could be cancelled.
// in any case, the task is removed from the TaskManager already
return;
}
else if (current == CREATED || current == SCHEDULED) {
// from here, we can directly switch to cancelled, because no task has been deployed
if (cancelAtomically()) {
return;
}
// else: fall through the loop
}
else {
throw new IllegalStateException(current.name());
}
}
}
public CompletableFuture<?> suspend() {
switch(state) {
case RUNNING:
case DEPLOYING:
case CREATED:
case SCHEDULED:
if (!cancelAtomically()) {
throw new IllegalStateException(
String.format("Could not directly go to %s from %s.", CANCELED.name(), state.name()));
}
break;
case CANCELING:
completeCancelling();
break;
case FINISHED:
case FAILED:
case CANCELED:
break;
default:
throw new IllegalStateException(state.name());
}
return releaseFuture;
}
private void scheduleConsumer(ExecutionVertex consumerVertex) {
try {
final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph();
consumerVertex.scheduleForExecution(
executionGraph.getSlotProvider(),
executionGraph.isQueuedSchedulingAllowed(),
LocationPreferenceConstraint.ANY, // there must be at least one known location
Collections.emptySet());
} catch (Throwable t) {
consumerVertex.fail(new IllegalStateException("Could not schedule consumer " +
"vertex " + consumerVertex, t));
}
}
void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
assertRunningInJobMasterMainThread();
final int numConsumers = allConsumers.size();
if (numConsumers > 1) {
fail(new IllegalStateException("Currently, only a single consumer group per partition is supported."));
} else if (numConsumers == 0) {
return;
}
for (ExecutionEdge edge : allConsumers.get(0)) {
final ExecutionVertex consumerVertex = edge.getTarget();
final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
final ExecutionState consumerState = consumer.getState();
// ----------------------------------------------------------------
// Consumer is created => try to schedule it and the partition info
// is known during deployment
// ----------------------------------------------------------------
if (consumerState == CREATED) {
// Schedule the consumer vertex if its inputs constraint is satisfied, otherwise skip the scheduling.
// A shortcut of input constraint check is added for InputDependencyConstraint.ANY since
// at least one of the consumer vertex's inputs is consumable here. This is to avoid the
// O(N) complexity introduced by input constraint check for InputDependencyConstraint.ANY,
// as we do not want the default scheduling performance to be affected.
if (consumerVertex.getInputDependencyConstraint() == InputDependencyConstraint.ANY ||
consumerVertex.checkInputDependencyConstraints()) {
scheduleConsumer(consumerVertex);
}
}
// ----------------------------------------------------------------
// Consumer is running => send update message now
// Consumer is deploying => cache the partition info which would be
// sent after switching to running
// ----------------------------------------------------------------
else if (consumerState == DEPLOYING || consumerState == RUNNING) {
final PartitionInfo partitionInfo = createPartitionInfo(edge);
if (consumerState == DEPLOYING) {
consumerVertex.cachePartitionInfo(partitionInfo);
} else {
consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(partitionInfo));
}
}
}
}
private static PartitionInfo createPartitionInfo(ExecutionEdge executionEdge) {
IntermediateDataSetID intermediateDataSetID = executionEdge.getSource().getIntermediateResult().getId();
ShuffleDescriptor shuffleDescriptor = getConsumedPartitionShuffleDescriptor(executionEdge, false);
return new PartitionInfo(intermediateDataSetID, shuffleDescriptor);
}
/**
* This method fails the vertex due to an external condition. The task will move to state FAILED.
* If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager.
*
* @param t The exception that caused the task to fail.
*/
@Override
public void fail(Throwable t) {
processFail(t, false);
}
/**
* Request a stack trace sample from the task of this execution.
*
* @param sampleId of the stack trace sample
* @param numSamples the sample should contain
* @param delayBetweenSamples to wait
* @param maxStackTraceDepth of the samples
* @param timeout until the request times out
* @return Future stack trace sample response
*/
public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample(
int sampleId,
int numSamples,
Time delayBetweenSamples,
int maxStackTraceDepth,
Time timeout) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
return taskManagerGateway.requestStackTraceSample(
attemptId,
sampleId,
numSamples,
delayBetweenSamples,
maxStackTraceDepth,
timeout);
} else {
return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned."));
}
}
/**
* Notify the task of this execution about a completed checkpoint.
*
* @param checkpointId of the completed checkpoint
* @param timestamp of the completed checkpoint
*/
public void notifyCheckpointComplete(long checkpointId, long timestamp) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.notifyCheckpointComplete(attemptId, getVertex().getJobId(), checkpointId, timestamp);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is " +
"no longer running.");
}
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
*/
public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, false);
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers
*/
public void triggerSynchronousSavepoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
}
private void triggerCheckpointHelper(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
final CheckpointType checkpointType = checkpointOptions.getCheckpointType();
if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) {
throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX.");
}
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is no longer running.");
}
}
// --------------------------------------------------------------------------------------------
// Callbacks
// --------------------------------------------------------------------------------------------
/**
* This method marks the task as failed, but will make no attempt to remove task execution from the task manager.
* It is intended for cases where the task is known not to be running, or then the TaskManager reports failure
* (in which case it has already removed the task).
*
* @param t The exception that caused the task to fail.
*/
void markFailed(Throwable t) {
processFail(t, true);
}
void markFailed(Throwable t, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
processFail(t, true, userAccumulators, metrics);
}
@VisibleForTesting
void markFinished() {
markFinished(null, null);
}
void markFinished(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
assertRunningInJobMasterMainThread();
// this call usually comes during RUNNING, but may also come while still in deploying (very fast tasks!)
while (true) {
ExecutionState current = this.state;
if (current == RUNNING || current == DEPLOYING) {
if (transitionState(current, FINISHED)) {
try {
for (IntermediateResultPartition finishedPartition
: getVertex().finishAllBlockingPartitions()) {
IntermediateResultPartition[] allPartitions = finishedPartition
.getIntermediateResult().getPartitions();
for (IntermediateResultPartition partition : allPartitions) {
scheduleOrUpdateConsumers(partition.getConsumers());
}
}
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(null);
vertex.getExecutionGraph().deregisterExecution(this);
}
finally {
vertex.executionFinished(this);
}
return;
}
}
else if (current == CANCELING) {
// we sent a cancel call, and the task manager finished before it arrived. We
// will never get a CANCELED call back from the job manager
completeCancelling(userAccumulators, metrics);
return;
}
else if (current == CANCELED || current == FAILED) {
if (LOG.isDebugEnabled()) {
LOG.debug("Task FINISHED, but concurrently went to state " + state);
}
return;
}
else {
// this should not happen, we need to fail this
markFailed(new Exception("Vertex received FINISHED message while being in state " + state));
return;
}
}
}
private boolean cancelAtomically() {
if (startCancelling(0)) {
completeCancelling();
return true;
} else {
return false;
}
}
private boolean startCancelling(int numberCancelRetries) {
if (transitionState(state, CANCELING)) {
taskManagerLocationFuture.cancel(false);
sendCancelRpcCall(numberCancelRetries);
return true;
} else {
return false;
}
}
void completeCancelling() {
completeCancelling(null, null);
}
void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// the taskmanagers can themselves cancel tasks without an external trigger, if they find that the
// network stack is canceled (for example by a failing / canceling receiver or sender
// this is an artifact of the old network runtime, but for now we need to support task transitions
// from running directly to canceled
while (true) {
ExecutionState current = this.state;
if (current == CANCELED) {
return;
}
else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {
updateAccumulatorsAndMetrics(userAccumulators, metrics);
if (transitionState(current, CANCELED)) {
finishCancellation();
return;
}
// else fall through the loop
}
else {
// failing in the meantime may happen and is no problem.
// anything else is a serious problem !!!
if (current != FAILED) {
String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state);
LOG.error(message);
vertex.getExecutionGraph().failGlobal(new Exception(message));
}
return;
}
}
}
private void finishCancellation() {
releaseAssignedResource(new FlinkException("Execution " + this + " was cancelled."));
vertex.getExecutionGraph().deregisterExecution(this);
// release partitions on TM in case the Task finished while we where already CANCELING
stopTrackingAndReleasePartitions();
}
void cachePartitionInfo(PartitionInfo partitionInfo) {
partitionInfos.add(partitionInfo);
}
private void sendPartitionInfos() {
if (!partitionInfos.isEmpty()) {
sendUpdatePartitionInfoRpcCall(new ArrayList<>(partitionInfos));
partitionInfos.clear();
}
}
// --------------------------------------------------------------------------------------------
// Internal Actions
// --------------------------------------------------------------------------------------------
private boolean processFail(Throwable t, boolean isCallback) {
return processFail(t, isCallback, null, null);
}
private boolean processFail(Throwable t, boolean isCallback, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// damn, we failed. This means only that we keep our books and notify our parent JobExecutionVertex
// the actual computation on the task manager is cleaned up by the TaskManager that noticed the failure
// we may need to loop multiple times (in the presence of concurrent calls) in order to
// atomically switch to failed
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == FAILED) {
// already failed. It is enough to remember once that we failed (its sad enough)
return false;
}
if (current == CANCELED || current == FINISHED) {
// we are already aborting or are already aborted or we are already finished
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
if (current == CANCELING) {
completeCancelling(userAccumulators, metrics);
return false;
}
if (transitionState(current, FAILED, t)) {
// success (in a manner of speaking)
this.failureCause = t;
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(t);
vertex.getExecutionGraph().deregisterExecution(this);
stopTrackingAndReleasePartitions();
if (!isCallback && (current == RUNNING || current == DEPLOYING)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending out cancel request, to remove task execution from TaskManager.");
}
try {
if (assignedResource != null) {
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
} catch (Throwable tt) {
// no reason this should ever happen, but log it to be safe
LOG.error("Error triggering cancel call while marking task {} as failed.", getVertex().getTaskNameWithSubtaskIndex(), tt);
}
}
// leave the loop
return true;
}
}
}
boolean switchToRunning() {
if (transitionState(DEPLOYING, RUNNING)) {
sendPartitionInfos();
return true;
}
else {
// something happened while the call was in progress.
// it can mean:
// - canceling, while deployment was in progress. state is now canceling, or canceled, if the response overtook
// - finishing (execution and finished call overtook the deployment answer, which is possible and happens for fast tasks)
// - failed (execution, failure, and failure message overtook the deployment answer)
ExecutionState currentState = this.state;
if (currentState == FINISHED || currentState == CANCELED) {
// do nothing, the task was really fast (nice)
// or it was canceled really fast
}
else if (currentState == CANCELING || currentState == FAILED) {
if (LOG.isDebugEnabled()) {
// this log statement is guarded because the 'getVertexWithAttempt()' method
// performs string concatenations
LOG.debug("Concurrent canceling/failing of {} while deployment was in progress.", getVertexWithAttempt());
}
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
else {
String message = String.format("Concurrent unexpected state transition of task %s to %s while deployment was in progress.",
getVertexWithAttempt(), currentState);
if (LOG.isDebugEnabled()) {
LOG.debug(message);
}
// undo the deployment
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
// record the failure
markFailed(new Exception(message));
}
return false;
}
}
/**
* This method sends a CancelTask message to the instance of the assigned slot.
*
* <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
*/
private void sendCancelRpcCall(int numberRetries) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
CompletableFuture<Acknowledge> cancelResultFuture = FutureUtils.retry(
() -> taskManagerGateway.cancelTask(attemptId, rpcTimeout),
numberRetries,
jobMasterMainThreadExecutor);
cancelResultFuture.whenComplete(
(ack, failure) -> {
if (failure != null) {
fail(new Exception("Task could not be canceled.", failure));
}
});
}
}
private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) {
PartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker();
for (ResultPartitionDeploymentDescriptor partition : partitions) {
partitionTracker.startTrackingPartition(
taskExecutorId,
partition);
}
}
void stopTrackingAndReleasePartitions() {
LOG.info("Discarding the results produced by task execution {}.", attemptId);
if (producedPartitions != null && producedPartitions.size() > 0) {
final PartitionTracker partitionTracker = getVertex().getExecutionGraph().getPartitionTracker();
final List<ResultPartitionID> producedPartitionIds = producedPartitions.values().stream()
.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
.map(ShuffleDescriptor::getResultPartitionID)
.collect(Collectors.toList());
partitionTracker.stopTrackingAndReleasePartitions(producedPartitionIds);
}
}
/**
* Update the partition infos on the assigned resource.
*
* @param partitionInfos for the remote task
*/
private void sendUpdatePartitionInfoRpcCall(
final Iterable<PartitionInfo> partitionInfos) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation();
CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout);
updatePartitionsResultFuture.whenCompleteAsync(
(ack, failure) -> {
// fail if there was a failure
if (failure != null) {
fail(new IllegalStateException("Update task on TaskManager " + taskManagerLocation +
" failed due to:", failure));
}
}, getVertex().getExecutionGraph().getJobMasterMainThreadExecutor());
}
}
/**
* Releases the assigned resource and completes the release future
* once the assigned resource has been successfully released.
*
* @param cause for the resource release, null if none
*/
private void releaseAssignedResource(@Nullable Throwable cause) {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
if (slot != null) {
ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
slot.releaseSlot(cause)
.whenComplete((Object ignored, Throwable throwable) -> {
jobMasterMainThreadExecutor.assertRunningInMainThread();
if (throwable != null) {
releaseFuture.completeExceptionally(throwable);
} else {
releaseFuture.complete(null);
}
});
} else {
// no assigned resource --> we can directly complete the release future
releaseFuture.complete(null);
}
}
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
/**
* Calculates the preferred locations based on the location preference constraint.
*
* @param locationPreferenceConstraint constraint for the location preference
* @return Future containing the collection of preferred locations. This might not be completed if not all inputs
* have been a resource assigned.
*/
@VisibleForTesting
public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) {
final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations();
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture;
switch(locationPreferenceConstraint) {
case ALL:
preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures);
break;
case ANY:
final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size());
for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) {
if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) {
final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null);
if (taskManagerLocation == null) {
throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug.");
}
completedTaskManagerLocations.add(taskManagerLocation);
}
}
preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations);
break;
default:
throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.');
}
return preferredLocationsFuture;
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState) {
return transitionState(currentState, targetState, null);
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) {
// sanity check
if (currentState.isTerminal()) {
throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + '.');
}
if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
markTimestamp(targetState);
if (error == null) {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState);
} else {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState, error);
}
if (targetState.isTerminal()) {
// complete the terminal state future
terminalStateFuture.complete(targetState);
}
// make sure that the state transition completes normally.
// potential errors (in listeners may not affect the main logic)
try {
vertex.notifyStateTransition(this, targetState, error);
}
catch (Throwable t) {
LOG.error("Error while notifying execution graph of execution state transition.", t);
}
return true;
} else {
return false;
}
}
private void markTimestamp(ExecutionState state) {
markTimestamp(state, System.currentTimeMillis());
}
private void markTimestamp(ExecutionState state, long timestamp) {
this.stateTimestamps[state.ordinal()] = timestamp;
}
public String getVertexWithAttempt() {
return vertex.getTaskNameWithSubtaskIndex() + " - execution #" + attemptNumber;
}
// ------------------------------------------------------------------------
// Accumulators
// ------------------------------------------------------------------------
/**
* Update accumulators (discarded when the Execution has already been terminated).
* @param userAccumulators the user accumulators
*/
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
synchronized (accumulatorLock) {
if (!state.isTerminal()) {
this.userAccumulators = userAccumulators;
}
}
}
public Map<String, Accumulator<?, ?>> getUserAccumulators() {
return userAccumulators;
}
@Override
public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulators =
userAccumulators == null ?
null :
userAccumulators.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> OptionalFailure.of(entry.getValue())));
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulators);
}
@Override
public int getParallelSubtaskIndex() {
return getVertex().getParallelSubtaskIndex();
}
@Override
public IOMetrics getIOMetrics() {
return ioMetrics;
}
private void updateAccumulatorsAndMetrics(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
if (userAccumulators != null) {
synchronized (accumulatorLock) {
this.userAccumulators = userAccumulators;
}
}
if (metrics != null) {
this.ioMetrics = metrics;
}
}
// ------------------------------------------------------------------------
// Standard utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
final LogicalSlot slot = assignedResource;
return String.format("Attempt #%d (%s) @ %s - [%s]", attemptNumber, vertex.getTaskNameWithSubtaskIndex(),
(slot == null ? "(unassigned)" : slot), state);
}
@Override
public ArchivedExecution archive() {
return new ArchivedExecution(this);
}
private void assertRunningInJobMasterMainThread() {
vertex.getExecutionGraph().assertRunningInJobMasterMainThread();
}
}
| shaoxuan-wang/flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java | Java | apache-2.0 | 59,232 |
/************************************************************************************
* XtApp.h : header file
*
* CXtApp Definition header, An Application framework for common use
* (Xtrovert Application Frameworks).
*
* AUTHOR : Sean Feng <[email protected]>
* DATE : Aug. 7, 2012
* Copyright (c) 2009-?. All Rights Reserved.
*
* This code may be used in compiled form in any way you desire. This
* file may be redistributed unmodified by any means PROVIDING it is
* not sold for profit without the authors written consent, and
* providing that this notice and the authors name and all copyright
* notices remains int act.
*
* An email letting me know how you are using it would be nice as well.
*
* This file is provided "as is" with no expressed or implied warranty.
* The author accepts no liability for any damage/loss of business that
* this product may cause.
*
************************************************************************************/
#ifndef __XT_APPLICATION_H__
#define __XT_APPLICATION_H__
#include "XtThread.h"
/*
Application Parameter:
-buildinfo(bi) Compiler, Platform(Win/Linux, 32/64bits), Built date.
-start/stop/restart
-reload
*/
//////////////////////////////////////////////////////////////////////////
// CLASS CXtApp
//////////////////////////////////////////////////////////////////////////
/* Terminate flag */
enum {
XTAPP_TF_STOP=0,
XTAPP_TF_RUNNING,
XTAPP_TF_RESTART,
XTAPP_TF_END
};
/* _T('-') */
const TCHAR XT_STRCMD_DIRECTIVE_BRIEF;
/* Complete Directive_T("--") [CPLT means Complete] */
const TCHAR XT_STRCMD_DIRECTIVE_CPLT[2]; /* _T("--") */
/* _T('/') */
const TCHAR XT_STRCMD_DIRECTIVE_SLASH; /* Compatible to Windows _T('/') */
class CXtApp : public CXtThread
{
public:
CXtApp(void);
virtual ~CXtApp(void);
// System Environment initialization/destruction.
int InitApplication( int argc, TCHAR *argv[] );
int ExitApplication(void);
// Application logics initialization/destruction.
int InitInstance(void);
int ExitInstance(void); // return app exit code
int RestartInstance(void); // handle restart by Restart Manager
int Run(void);
/* Administrator Mode */
/*int OptCmdAdmin(void);*/
/*int CmdNetState(void);*/
/*int CmdNetConnect( TCHAR *szAddress );*/
/*int CmdNetRestart(void);*/
/*int CmdNetPing( TCHAR *szAddress );*/
static int m_nTermFlag;
static CXtApp *m_pThisApp;
#if defined(_DEBUG)
void SetLabel( const TCHAR *szLabel );
TCHAR m_szLabel[_MAX_STR_LEN_32_];
#endif
protected:
virtual void Reset(void);
// virtual BOOL OnIdle( LONG lCount ); // return TRUE if more idle processing
/*****************************************************************
* InitApplication() is implemented with the following methods.
* ExitApplication() is implemented with the following methods.
******************************************************************/
/* React to a shell-issued command line directive. */
virtual int ProcessShellCommand( int argc, TCHAR *argv[] );
virtual BOOL GetShellCommand( int argc, TCHAR* argv[], const TCHAR **cszOption, const TCHAR **cszParam );
/* Decide whether process runs under Daemon Mode. */
virtual void SetupDaemonMode(void); /* Setup : m_bDaemon = TRUE/FALSE */
/* Do something extra in derived-class. */
virtual int OnInitApp(void);
virtual int OnExitApp(void);
/*****************************************************************
* InitApplication()/ExitApplication() implementation END
******************************************************************/
/*****************************************************************
* InitInstance() is implemented with the following methods.
* ExitInstance() is implemented with the following methods.
******************************************************************/
int Daemon(void);
virtual int OnInitInstance(void);
virtual int OnExitInstance(void);
/*****************************************************************
* InitInstance()/ExitInstance() implementation END
******************************************************************/
/*****************************************************************
* Run() is implemented with the following methods.
******************************************************************/
virtual int AppProc(void);
/*****************************************************************
* Run() implementation END
******************************************************************/
/* Methods */
int GetCwd( TCHAR *szDir ); /* Get current working directory. */
int SetCwd( const TCHAR *szDir ); /* Set current working directory. */
int GetExeDir( TCHAR *szDir ); /* Get directory where exe-file lies in. */
const TCHAR* GetCmdLineString(void); /* Get command line string, that how to start this program. */
static void GotTerminate( int sig );
/*virtual int WaitThreads(void);*/
/* This process is running under daemon mode or not? */
BOOL m_bDaemon; /* SetupDaemonMode() setup this member. */
BOOL m_bRestart;
private:
};
#endif /*__XT_APPLICATION_H__*/
| tempbottle/xtrovert | xtrovert/app/inc/XtApp.h | C | apache-2.0 | 5,143 |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="SHORTCUT ICON" href="../../../../../img/clover.ico" />
<link rel="stylesheet" href="../../../../../aui/css/aui.min.css" media="all"/>
<link rel="stylesheet" href="../../../../../aui/css/aui-experimental.min.css" media="all"/>
<!--[if IE 9]><link rel="stylesheet" href="../../../../../aui/css/aui-ie9.min.css" media="all"/><![endif]-->
<style type="text/css" media="all">
@import url('../../../../../style.css');
@import url('../../../../../tree.css');
</style>
<script src="../../../../../jquery-1.8.3.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-experimental.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-soy.min.js" type="text/javascript"></script>
<script src="../../../../../package-nodes-tree.js" type="text/javascript"></script>
<script src="../../../../../clover-tree.js" type="text/javascript"></script>
<script src="../../../../../clover.js" type="text/javascript"></script>
<script src="../../../../../clover-descriptions.js" type="text/javascript"></script>
<script src="../../../../../cloud.js" type="text/javascript"></script>
<title>ABA Route Transit Number Validator 1.0.1-SNAPSHOT</title>
</head>
<body>
<div id="page">
<header id="header" role="banner">
<nav class="aui-header aui-dropdown2-trigger-group" role="navigation">
<div class="aui-header-inner">
<div class="aui-header-primary">
<h1 id="logo" class="aui-header-logo aui-header-logo-clover">
<a href="http://openclover.org" title="Visit OpenClover home page"><span class="aui-header-logo-device">OpenClover</span></a>
</h1>
</div>
<div class="aui-header-secondary">
<ul class="aui-nav">
<li id="system-help-menu">
<a class="aui-nav-link" title="Open online documentation" target="_blank"
href="http://openclover.org/documentation">
<span class="aui-icon aui-icon-small aui-iconfont-help"> Help</span>
</a>
</li>
</ul>
</div>
</div>
</nav>
</header>
<div class="aui-page-panel">
<div class="aui-page-panel-inner">
<div class="aui-page-panel-nav aui-page-panel-nav-clover">
<div class="aui-page-header-inner" style="margin-bottom: 20px;">
<div class="aui-page-header-image">
<a href="http://cardatechnologies.com" target="_top">
<div class="aui-avatar aui-avatar-large aui-avatar-project">
<div class="aui-avatar-inner">
<img src="../../../../../img/clover_logo_large.png" alt="Clover icon"/>
</div>
</div>
</a>
</div>
<div class="aui-page-header-main" >
<h1>
<a href="http://cardatechnologies.com" target="_top">
ABA Route Transit Number Validator 1.0.1-SNAPSHOT
</a>
</h1>
</div>
</div>
<nav class="aui-navgroup aui-navgroup-vertical">
<div class="aui-navgroup-inner">
<ul class="aui-nav">
<li class="">
<a href="../../../../../dashboard.html">Project overview</a>
</li>
</ul>
<div class="aui-nav-heading packages-nav-heading">
<strong>Packages</strong>
</div>
<div class="aui-nav project-packages">
<form method="get" action="#" class="aui package-filter-container">
<input type="text" autocomplete="off" class="package-filter text"
placeholder="Type to filter packages..." name="package-filter" id="package-filter"
title="Start typing package name (or part of the name) to search through the tree. Use arrow keys and the Enter key to navigate."/>
</form>
<p class="package-filter-no-results-message hidden">
<small>No results found.</small>
</p>
<div class="packages-tree-wrapper" data-root-relative="../../../../../" data-package-name="com.cardatechnologies.utils.validators.abaroutevalidator">
<div class="packages-tree-container"></div>
<div class="clover-packages-lozenges"></div>
</div>
</div>
</div>
</nav> </div>
<section class="aui-page-panel-content">
<div class="aui-page-panel-content-clover">
<div class="aui-page-header-main"><ol class="aui-nav aui-nav-breadcrumbs">
<li><a href="../../../../../dashboard.html"> Project Clover database Sat Aug 7 2021 12:29:33 MDT</a></li>
<li><a href="test-pkg-summary.html">Package com.cardatechnologies.utils.validators.abaroutevalidator</a></li>
<li><a href="test-Test_AbaRouteValidator_07.html">Class Test_AbaRouteValidator_07</a></li>
</ol></div>
<h1 class="aui-h2-clover">
Test testAbaNumberCheck_13433_good
</h1>
<table class="aui">
<thead>
<tr>
<th>Test</th>
<th><label title="The test result. Either a Pass, Fail or Error.">Status</label></th>
<th><label title="When the test execution was started">Start time</label></th>
<th><label title="The total time in seconds taken to run this test.">Time (seconds)</label></th>
<th><label title="A failure or error message if the test is not successful.">Message</label></th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_07.html?line=23656#src-23656" >testAbaNumberCheck_13433_good</a>
</td>
<td>
<span class="sortValue">1</span><span class="aui-lozenge aui-lozenge-success">PASS</span>
</td>
<td>
7 Aug 12:36:43
</td>
<td>
0.0 </td>
<td>
<div></div>
<div class="errorMessage"></div>
</td>
</tr>
</tbody>
</table>
<div> </div>
<table class="aui aui-table-sortable">
<thead>
<tr>
<th style="white-space:nowrap;"><label title="A class that was directly hit by this test.">Target Class</label></th>
<th colspan="4"><label title="The percentage of coverage contributed by each single test.">Coverage contributed by</label> testAbaNumberCheck_13433_good</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</span>
  <a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/AbaRouteValidator.html?id=31840#AbaRouteValidator" title="AbaRouteValidator" name="sl-47">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</a>
</td>
<td>
<span class="sortValue">0.7352941</span>73.5%
</td>
<td class="align-middle" style="width: 100%" colspan="3">
<div>
<div title="73.5% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:73.5%"></div></div></div> </td>
</tr>
</tbody>
</table>
</div> <!-- class="aui-page-panel-content-clover" -->
<footer id="footer" role="contentinfo">
<section class="footer-body">
<ul>
<li>
Report generated by <a target="_new" href="http://openclover.org">OpenClover</a> v 4.4.1
on Sat Aug 7 2021 12:49:26 MDT using coverage data from Sat Aug 7 2021 12:47:23 MDT.
</li>
</ul>
<ul>
<li>OpenClover is free and open-source software. </li>
</ul>
</section>
</footer> </section> <!-- class="aui-page-panel-content" -->
</div> <!-- class="aui-page-panel-inner" -->
</div> <!-- class="aui-page-panel" -->
</div> <!-- id="page" -->
</body>
</html> | dcarda/aba.route.validator | target13/site/clover/com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_07_testAbaNumberCheck_13433_good_okg.html | HTML | apache-2.0 | 9,181 |
package water;
import java.io.*;
import java.lang.reflect.Array;
import java.net.*;
import java.nio.*;
import java.nio.channels.*;
import java.util.ArrayList;
import java.util.Random;
import water.network.SocketChannelUtils;
import water.util.Log;
import water.util.StringUtils;
import water.util.TwoDimTable;
/** A ByteBuffer backed mixed Input/Output streaming class, using Iced serialization.
*
* Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it
* we go to the ByteChannel for more/less. Because DirectByteBuffers are
* expensive to make, we keep a few pooled.
*
* When talking to a remote H2O node, switches between UDP and TCP transport
* protocols depending on the message size. The TypeMap is not included, and
* is assumed to exist on the remote H2O node.
*
* Supports direct NIO FileChannel read/write to disk, used during user-mode
* swapping. The TypeMap is not included on write, and is assumed to be the
* current map on read.
*
* Support read/write from byte[] - and this defeats the purpose of a
* Streaming protocol, but is frequently handy for small structures. The
* TypeMap is not included, and is assumed to be the current map on read.
*
* Supports read/write from a standard Stream, which by default assumes it is
* NOT going in and out of the same Cloud, so the TypeMap IS included. The
* serialized object can only be read back into the same minor version of H2O.
*
* @author <a href="mailto:[email protected]"></a>
*/
public final class AutoBuffer {
// Maximum size of an array we allow to allocate (the value is designed
// to mimic the behavior of OpenJDK libraries)
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// The direct ByteBuffer for schlorping data about.
// Set to null to indicate the AutoBuffer is closed.
ByteBuffer _bb;
public String sourceName = "???";
public boolean isClosed() { return _bb == null ; }
// The ByteChannel for moving data in or out. Could be a SocketChannel (for
// a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel
// (for a UDP connection). Null on closed AutoBuffers. Null on initial
// remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null
// for open AutoBuffers doing file i/o or reading any TCP/UDP or having
// written at least one buffer to TCP/UDP.
private Channel _chan;
// A Stream for moving data in. Null unless this AutoBuffer is
// stream-based, in which case _chan field is null. This path supports
// persistance: reading and writing objects from different H2O cluster
// instances (but exactly the same H2O revision). The only required
// similarity is same-classes-same-fields; changes here will probably
// silently crash. If the fields are named the same but the semantics
// differ, then again the behavior is probably silent crash.
private InputStream _is;
private short[] _typeMap; // Mapping from input stream map to current map, or null
// If we need a SocketChannel, raise the priority so we get the I/O over
// with. Do not want to have some TCP socket open, blocking the TCP channel
// and then have the thread stalled out. If we raise the priority - be sure
// to lower it again. Note this is for TCP channels ONLY, and only because
// we are blocking another Node with I/O.
private int _oldPrior = -1;
// Where to send or receive data via TCP or UDP (choice made as we discover
// how big the message is); used to lazily create a Channel. If NULL, then
// _chan should be a pre-existing Channel, such as a FileChannel.
final H2ONode _h2o;
// TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround.
private boolean _read;
// TRUE if this AutoBuffer has never advanced past the first "page" of data.
// The UDP-flavor, port# and task fields are only valid until we read over
// them when flipping the ByteBuffer to the next chunk of data. Used in
// asserts all over the place.
private boolean _firstPage;
// Total size written out from 'new' to 'close'. Only updated when actually
// reading or writing data, or after close(). For profiling only.
int _size;
//int _zeros, _arys;
// More profiling: start->close msec, plus nano's spent in blocking I/O
// calls. The difference between (close-start) and i/o msec is the time the
// i/o thread spends doing other stuff (e.g. allocating Java objects or
// (de)serializing).
long _time_start_ms, _time_close_ms, _time_io_ns;
// I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time.
final byte _persist;
// The assumed max UDP packetsize
static final int MTU = 1500-8/*UDP packet header size*/;
// Enable this to test random TCP fails on open or write
static final Random RANDOM_TCP_DROP = null; //new Random();
static final java.nio.charset.Charset UTF_8 = java.nio.charset.Charset.forName("UTF-8");
/** Incoming UDP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read. */
AutoBuffer( DatagramChannel sock ) throws IOException {
_chan = null;
_bb = BBP_SML.make(); // Get a small / UDP-sized ByteBuffer
_read = true; // Reading by default
_firstPage = true;
// Read a packet; can get H2ONode from 'sad'?
Inet4Address addr = null;
SocketAddress sad = sock.receive(_bb);
if( sad instanceof InetSocketAddress ) {
InetAddress address = ((InetSocketAddress) sad).getAddress();
if( address instanceof Inet4Address ) {
addr = (Inet4Address) address;
}
}
_size = _bb.position();
_bb.flip(); // Set limit=amount read, and position==0
if( addr == null ) throw new RuntimeException("Unhandled socket type: " + sad);
// Read Inet from socket, port from the stream, figure out H2ONode
_h2o = H2ONode.intern(addr, getPort());
_firstPage = true;
assert _h2o != null;
_persist = 0; // No persistance
}
/** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read.
*
* remoteAddress set to null means that the communication is originating from non-h2o node, non-null value
* represents the case where the communication is coming from h2o node.
* */
AutoBuffer( ByteChannel sock, InetAddress remoteAddress ) throws IOException {
_chan = sock;
raisePriority(); // Make TCP priority high
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_bb.flip();
_read = true; // Reading by default
_firstPage = true;
// Read Inet from socket, port from the stream, figure out H2ONode
if(remoteAddress!=null) {
_h2o = H2ONode.intern(remoteAddress, getPort());
}else{
// In case the communication originates from non-h2o node, we set _h2o node to null.
// It is done for 2 reasons:
// - H2ONode.intern creates a new thread and if there's a lot of connections
// from non-h2o environment, it could end up with too many open files exception.
// - H2OIntern also reads port (getPort()) and additional information which we do not send
// in communication originating from non-h2o nodes
_h2o = null;
}
_firstPage = true; // Yes, must reset this.
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
}
/** Make an AutoBuffer to write to an H2ONode. Requests for full buffer will
* open a TCP socket and roll through writing to the target. Smaller
* requests will send via UDP. Small requests get ordered by priority, so
* that e.g. NACK and ACKACK messages have priority over most anything else.
* This helps in UDP floods to shut down flooding senders. */
private byte _msg_priority;
AutoBuffer( H2ONode h2o, byte priority ) {
// If UDP goes via UDP, we write into a DBB up front - because we plan on
// sending it out via a Datagram socket send call. If UDP goes via batched
// TCP, we write into a HBB up front, because this will be copied again
// into a large outgoing buffer.
_bb = H2O.ARGS.useUDP // Actually use UDP?
? BBP_SML.make() // Make DirectByteBuffers to start with
: ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null; // Channel made lazily only if we write alot
_h2o = h2o;
_read = false; // Writing by default
_firstPage = true; // Filling first page
assert _h2o != null;
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
_msg_priority = priority;
}
/** Spill-to/from-disk request. */
public AutoBuffer( FileChannel fc, boolean read, byte persist ) {
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_chan = fc; // Write to read/write
_h2o = null; // File Channels never have an _h2o
_read = read; // Mostly assert reading vs writing
if( read ) _bb.flip();
_time_start_ms = System.currentTimeMillis();
_persist = persist; // One of Value.ICE, NFS, S3, HDFS
}
/** Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O. */
AutoBuffer( DatagramPacket pack ) {
_size = pack.getLength();
_bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder());
_bb.position(0);
_read = true;
_firstPage = true;
_chan = null;
_h2o = H2ONode.intern(pack.getAddress(), getPort());
_persist = 0; // No persistance
}
/** Read from a UDP_TCP buffer; could be in the middle of a large buffer */
AutoBuffer( H2ONode h2o, byte[] buf, int off, int len ) {
assert buf != null : "null fed to ByteBuffer.wrap";
_h2o = h2o;
_bb = ByteBuffer.wrap(buf,off,len).order(ByteOrder.nativeOrder());
_chan = null;
_read = true;
_firstPage = true;
_persist = 0; // No persistance
_size = len;
}
/** Read from a fixed byte[]; should not be closed. */
public AutoBuffer( byte[] buf ) { this(null,buf,0, buf.length); }
/** Write to an ever-expanding byte[]. Instead of calling {@link #close()},
* call {@link #buf()} to retrieve the final byte[]. */
public AutoBuffer( ) {
_bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a known sized byte[]. Instead of calling close(), call
* {@link #bufClose()} to retrieve the final byte[]. */
public AutoBuffer( int len ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a persistent Stream, including all TypeMap info to allow later
* reloading (by the same exact rev of H2O). */
public AutoBuffer( OutputStream os, boolean persist ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_read = false;
_chan = Channels.newChannel(os);
_h2o = null;
_firstPage = true;
_persist = 0;
if( persist ) put1(0x1C).put1(0xED).putStr(H2O.ABV.projectVersion()).putAStr(TypeMap.CLAZZES);
else put1(0);
}
/** Read from a persistent Stream (including all TypeMap info) into same
* exact rev of H2O). */
public AutoBuffer( InputStream is ) {
_chan = null;
_h2o = null;
_firstPage = true;
_persist = 0;
_read = true;
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_bb.flip();
_is = is;
int b = get1U();
if( b==0 ) return; // No persistence info
int magic = get1U();
if( b!=0x1C || magic != 0xED ) throw new IllegalArgumentException("Missing magic number 0x1CED at stream start");
String version = getStr();
if( !version.equals(H2O.ABV.projectVersion()) )
throw new IllegalArgumentException("Found version "+version+", but running version "+H2O.ABV.projectVersion());
String[] typeMap = getAStr();
_typeMap = new short[typeMap.length];
for( int i=0; i<typeMap.length; i++ )
_typeMap[i] = (short)(typeMap[i]==null ? 0 : TypeMap.onIce(typeMap[i]));
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[AB ").append(_read ? "read " : "write ");
sb.append(_firstPage?"first ":"2nd ").append(_h2o);
sb.append(" ").append(Value.nameOfPersist(_persist));
if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit());
if( _bb != null ) sb.append(" <= ").append(_bb.capacity());
return sb.append("]").toString();
}
// Fetch a DBB from an object pool... they are fairly expensive to make
// because a native call is required to get the backing memory. I've
// included BB count tracking code to help track leaks. As of 12/17/2012 the
// leaks are under control, but figure this may happen again so keeping these
// counters around.
//
// We use 2 pool sizes: lots of small UDP packet-sized buffers and fewer
// larger TCP-sized buffers.
private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks");
private static long HWM=0;
static class BBPool {
long _made, _cached, _freed;
long _numer, _denom, _goal=4*H2O.NUMCPUS, _lastGoal;
final ArrayList<ByteBuffer> _bbs = new ArrayList<>();
final int _size; // Big or small size of ByteBuffers
BBPool( int sz) { _size=sz; }
private ByteBuffer stats( ByteBuffer bb ) {
if( !DEBUG ) return bb;
if( ((_made+_cached)&255)!=255 ) return bb; // Filter printing to 1 in 256
long now = System.currentTimeMillis();
if( now < HWM ) return bb;
HWM = now+1000;
water.util.SB sb = new water.util.SB();
sb.p("BB").p(this==BBP_BIG?1:0).p(" made=").p(_made).p(" -freed=").p(_freed).p(", cache hit=").p(_cached).p(" ratio=").p(_numer/_denom).p(", goal=").p(_goal).p(" cache size=").p(_bbs.size()).nl();
for( int i=0; i<H2O.MAX_PRIORITY; i++ ) {
int x = H2O.getWrkQueueSize(i);
if( x > 0 ) sb.p('Q').p(i).p('=').p(x).p(' ');
}
Log.warn(sb.nl().toString());
return bb;
}
ByteBuffer make() {
while( true ) { // Repeat loop for DBB OutOfMemory errors
ByteBuffer bb=null;
synchronized(_bbs) {
int sz = _bbs.size();
if( sz > 0 ) { bb = _bbs.remove(sz-1); _cached++; _numer++; }
}
if( bb != null ) return stats(bb);
// Cache empty; go get one from C/Native memory
try {
bb = ByteBuffer.allocateDirect(_size).order(ByteOrder.nativeOrder());
synchronized(this) { _made++; _denom++; _goal = Math.max(_goal,_made-_freed); _lastGoal=System.nanoTime(); } // Goal was too low, raise it
return stats(bb);
} catch( OutOfMemoryError oome ) {
// java.lang.OutOfMemoryError: Direct buffer memory
if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome;
System.out.println("OOM DBB - Sleeping & retrying");
try { Thread.sleep(100); } catch( InterruptedException ignore ) { }
}
}
}
void free(ByteBuffer bb) {
// Heuristic: keep the ratio of BB's made to cache-hits at a fixed level.
// Free to GC if ratio is high, free to internal cache if low.
long ratio = _numer/(_denom+1);
synchronized(_bbs) {
if( ratio < 100 || _bbs.size() < _goal ) { // low hit/miss ratio or below goal
bb.clear(); // Clear-before-add
_bbs.add(bb);
} else _freed++; // Toss the extras (above goal & ratio)
long now = System.nanoTime();
if( now-_lastGoal > 1000000000L ) { // Once/sec, drop goal by 10%
_lastGoal = now;
if( ratio > 110 ) // If ratio is really high, lower goal
_goal=Math.max(4*H2O.NUMCPUS,(long)(_goal*0.99));
// Once/sec, lower numer/denom... means more recent activity outweighs really old stuff
long denom = (long) (0.99 * _denom); // Proposed reduction
if( denom > 10 ) { // Keep a little precision
_numer = (long) (0.99 * _numer); // Keep ratio between made & cached the same
_denom = denom; // ... by lowering both by 10%
}
}
}
}
static int FREE( ByteBuffer bb ) {
if(bb.isDirect())
(bb.capacity()==BBP_BIG._size ? BBP_BIG : BBP_SML).free(bb);
return 0; // Flow coding
}
}
static BBPool BBP_SML = new BBPool( 2*1024); // Bytebuffer "common small size", for UDP
static BBPool BBP_BIG = new BBPool(64*1024); // Bytebuffer "common big size", for TCP
public static int TCP_BUF_SIZ = BBP_BIG._size;
private int bbFree() {
if(_bb != null && _bb.isDirect())
BBPool.FREE(_bb);
_bb = null;
return 0; // Flow-coding
}
// You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the
// time under heavy network load. Connection-reset-by-peer & connection
// timeouts abound, even after a socket open and after a 1st successful
// ByteBuffer write. It *appears* that the reader is unaware that a writer
// was told "go ahead and write" by the TCP stack, so all these fails are
// only on the writer-side.
public static class AutoBufferException extends RuntimeException {
public final IOException _ioe;
AutoBufferException( IOException ioe ) { _ioe = ioe; }
}
// For reads, just assert all was read and close and release resources.
// (release ByteBuffer back to the common pool). For writes, force any final
// bytes out. If the write is to an H2ONode and is short, send via UDP.
// AutoBuffer close calls order; i.e. a reader close() will block until the
// writer does a close().
public final int close() {
//if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys);
if( isClosed() ) return 0; // Already closed
assert _h2o != null || _chan != null || _is != null; // Byte-array backed should not be closed
try {
if( _chan == null ) { // No channel?
if( _read ) {
if( _is != null ) _is.close();
return 0;
} else { // Write
// For small-packet write, send via UDP. Since nothing is sent until
// now, this close() call trivially orders - since the reader will not
// even start (much less close()) until this packet is sent.
if( _bb.position() < MTU) return udpSend();
// oops - Big Write, switch to TCP and finish out there
}
}
// Force AutoBuffer 'close' calls to order; i.e. block readers until
// writers do a 'close' - by writing 1 more byte in the close-call which
// the reader will have to wait for.
if( hasTCP()) { // TCP connection?
try {
if( _read ) { // Reader?
int x = get1U(); // Read 1 more byte
assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this;
assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP
// Write the reader-handshake-byte.
SocketChannelUtils.underlyingSocketChannel(_chan).socket().getOutputStream().write(0xcd);
// do not close actually reader socket; recycle it in TCPReader thread
} else { // Writer?
put1(0xab); // Write one-more byte ; might set _chan from null to not-null
sendPartial(); // Finish partial writes; might set _chan from null to not-null
assert _chan != null; // _chan is set not-null now!
// Read the writer-handshake-byte.
int x = SocketChannelUtils.underlyingSocketChannel(_chan).socket().getInputStream().read();
// either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled).
if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read");
assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x;
}
} catch( IOException ioe ) {
try { _chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now, since i/o error
throw ioe; // Rethrow after close
} finally {
if( !_read ) _h2o.freeTCPSocket((ByteChannel) _chan); // Recycle writable TCP channel
restorePriority(); // And if we raised priority, lower it back
}
} else { // FileChannel
if( !_read ) sendPartial(); // Finish partial file-system writes
_chan.close();
_chan = null; // Closed file channel
}
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
throw new AutoBufferException(e);
} finally {
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
return 0;
}
// Need a sock for a big read or write operation.
// See if we got one already, else open a new socket.
private void tcpOpen() throws IOException {
assert _firstPage && _bb.limit() >= 1+2+4; // At least something written
assert _chan == null;
// assert _bb.position()==0;
_chan = _h2o.getTCPSocket();
raisePriority();
}
// Just close the channel here without reading anything. Without the task
// object at hand we do not know what (how many bytes) should we read from
// the channel. And since the other side will try to read confirmation from
// us before closing the channel, we can not read till the end. So we just
// close the channel and let the other side to deal with it and figure out
// the task has been cancelled (still sending ack ack back).
void drainClose() {
if( isClosed() ) return; // Already closed
final Channel chan = _chan; // Read before closing
assert _h2o != null || chan != null; // Byte-array backed should not be closed
if( chan != null ) { // Channel assumed sick from prior IOException
try { chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now!
if( !_read && SocketChannelUtils.isSocketChannel(chan)) _h2o.freeTCPSocket((ByteChannel) chan); // Recycle writable TCP channel
}
restorePriority(); // And if we raised priority, lower it back
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
// True if we opened a TCP channel, or will open one to close-and-send
boolean hasTCP() { assert !isClosed(); return SocketChannelUtils.isSocketChannel(_chan) || (_h2o!=null && _bb.position() >= MTU); }
// Size in bytes sent, after a close()
int size() { return _size; }
//int zeros() { return _zeros; }
public int position () { return _bb.position(); }
public AutoBuffer position(int p) {_bb.position(p); return this;}
/** Skip over some bytes in the byte buffer. Caller is responsible for not
* reading off end of the bytebuffer; generally this is easy for
* array-backed autobuffers and difficult for i/o-backed bytebuffers. */
public void skip(int skip) { _bb.position(_bb.position()+skip); }
// Return byte[] from a writable AutoBuffer
public final byte[] buf() {
assert _h2o==null && _chan==null && !_read && !_bb.isDirect();
return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position());
}
public final byte[] bufClose() {
byte[] res = _bb.array();
bbFree();
return res;
}
// For TCP sockets ONLY, raise the thread priority. We assume we are
// blocking other Nodes with our network I/O, so try to get the I/O
// over with.
private void raisePriority() {
if(_oldPrior == -1){
assert SocketChannelUtils.isSocketChannel(_chan);
_oldPrior = Thread.currentThread().getPriority();
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
}
}
private void restorePriority() {
if( _oldPrior == -1 ) return;
Thread.currentThread().setPriority(_oldPrior);
_oldPrior = -1;
}
// Send via UDP socket. Unlike eg TCP sockets, we only need one for sending
// so we keep a global one. Also, we do not close it when done, and we do
// not connect it up-front to a target - but send the entire packet right now.
private int udpSend() throws IOException {
assert _chan == null;
TimeLine.record_send(this,false);
_size = _bb.position();
assert _size < AutoBuffer.BBP_SML._size;
_bb.flip(); // Flip for sending
if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal
water.init.NetworkInit.multicast(_bb, _msg_priority);
} else { // Else single-cast send
if(H2O.ARGS.useUDP) // Send via UDP directly
water.init.NetworkInit.CLOUD_DGRAM.send(_bb, _h2o._key);
else // Send via bulk TCP
_h2o.sendMessage(_bb, _msg_priority);
}
return 0; // Flow-coding
}
// Flip to write-mode
AutoBuffer clearForWriting(byte priority) {
assert _read;
_read = false;
_msg_priority = priority;
_bb.clear();
_firstPage = true;
return this;
}
// Flip to read-mode
public AutoBuffer flipForReading() {
assert !_read;
_read = true;
_bb.flip();
_firstPage = true;
return this;
}
/** Ensure the buffer has space for sz more bytes */
private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; }
/** Ensure buffer has at least sz bytes in it.
* - Also, set position just past this limit for future reading. */
private ByteBuffer getSz(int sz) {
assert _firstPage : "getSz() is only valid for early UDP bytes";
if( sz > _bb.limit() ) getImpl(sz);
_bb.position(sz);
return _bb;
}
private ByteBuffer getImpl( int sz ) {
assert _read : "Reading from a buffer in write mode";
_bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading
// Its got to fit or we asked for too much
assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")";
long ns = System.nanoTime();
while( _bb.position() < sz ) { // Read until we got enuf
try {
int res = readAnInt(); // Read more
// Readers are supposed to be strongly typed and read the exact expected bytes.
// However, if a TCP connection fails mid-read we'll get a short-read.
// This is indistinguishable from a mis-alignment between the writer and reader!
if( res <= 0 )
throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this));
if( _is != null ) _bb.position(_bb.position()+res); // Advance BB for Streams manually
_size += res; // What we read
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
// Linux/Ubuntu message for a reset-channel
if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") )
throw new AutoBufferException(e);
// Windows message for a reset-channel
if( e.getMessage().equals("An established connection was aborted by the software in your host machine") )
throw new AutoBufferException(e);
throw Log.throwErr(e);
}
}
_time_io_ns += (System.nanoTime()-ns);
_bb.flip(); // Prep for handing out bytes
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
_firstPage = false; // First page of data is gone gone gone
return _bb;
}
private int readAnInt() throws IOException {
if (_is == null) return ((ReadableByteChannel) _chan).read(_bb);
final byte[] array = _bb.array();
final int position = _bb.position();
final int remaining = _bb.remaining();
try {
return _is.read(array, position, remaining);
} catch (IOException ioe) {
throw new IOException("Failed reading " + remaining + " bytes into buffer[" + array.length + "] at " + position + " from " + sourceName + " " + _is, ioe);
}
}
/** Put as needed to keep from overflowing the ByteBuffer. */
private ByteBuffer putSp( int sz ) {
assert !_read;
if (sz > _bb.remaining()) {
if ((_h2o == null && _chan == null) || (_bb.hasArray() && _bb.capacity() < BBP_BIG._size))
expandByteBuffer(sz);
else
sendPartial();
assert sz <= _bb.remaining();
}
return _bb;
}
// Do something with partial results, because the ByteBuffer is full.
// If we are doing I/O, ship the bytes we have now and flip the ByteBuffer.
private ByteBuffer sendPartial() {
// Doing I/O with the full ByteBuffer - ship partial results
_size += _bb.position();
if( _chan == null )
TimeLine.record_send(this, true);
_bb.flip(); // Prep for writing.
try {
if( _chan == null )
tcpOpen(); // This is a big operation. Open a TCP socket as-needed.
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
long ns = System.nanoTime();
while( _bb.hasRemaining() ) {
((WritableByteChannel) _chan).write(_bb);
if( RANDOM_TCP_DROP != null && SocketChannelUtils.isSocketChannel(_chan) && RANDOM_TCP_DROP.nextInt(100) == 0 )
throw new IOException("Random TCP Write Fail");
}
_time_io_ns += (System.nanoTime()-ns);
} catch( IOException e ) { // Some kind of TCP fail?
// Change to an unchecked exception (so we don't have to annotate every
// frick'n put1/put2/put4/read/write call). Retry & recovery happens at
// a higher level. AutoBuffers are used for many things including e.g.
// disk i/o & UDP writes; this exception only happens on a failed TCP
// write - and we don't want to make the other AutoBuffer users have to
// declare (and then ignore) this exception.
throw new AutoBufferException(e);
}
_firstPage = false;
_bb.clear();
return _bb;
}
// Called when the byte buffer doesn't have enough room
// If buffer is array backed, and the needed room is small,
// increase the size of the backing array,
// otherwise dump into a large direct buffer
private ByteBuffer expandByteBuffer(int sizeHint) {
final long needed = (long) sizeHint - _bb.remaining() + _bb.capacity(); // Max needed is 2G
if ((_h2o==null && _chan == null) || (_bb.hasArray() && needed < MTU)) {
if (needed > MAX_ARRAY_SIZE) {
throw new IllegalArgumentException("Cannot allocate more than 2GB array: sizeHint="+sizeHint+", "
+ "needed="+needed
+ ", bb.remaining()=" + _bb.remaining() + ", bb.capacity()="+_bb.capacity());
}
byte[] ary = _bb.array();
// just get twice what is currently needed but not more then max array size (2G)
// Be careful not to overflow because of integer math!
int newLen = (int) Math.min(1L << (water.util.MathUtils.log2(needed)+1), MAX_ARRAY_SIZE);
int oldpos = _bb.position();
_bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newLen),oldpos,newLen-oldpos)
.order(ByteOrder.nativeOrder());
} else if (_bb.capacity() != BBP_BIG._size) { //avoid expanding existing BBP items
int oldPos = _bb.position();
_bb.flip();
_bb = BBP_BIG.make().put(_bb);
_bb.position(oldPos);
}
return _bb;
}
@SuppressWarnings("unused") public String getStr(int off, int len) {
return new String(_bb.array(), _bb.arrayOffset()+off, len, UTF_8);
}
// -----------------------------------------------
// Utility functions to get various Java primitives
@SuppressWarnings("unused") public boolean getZ() { return get1()!=0; }
@SuppressWarnings("unused") public byte get1 () { return getSp(1).get (); }
@SuppressWarnings("unused") public int get1U() { return get1() & 0xFF; }
@SuppressWarnings("unused") public char get2 () { return getSp(2).getChar (); }
@SuppressWarnings("unused") public short get2s () { return getSp(2).getShort (); }
@SuppressWarnings("unused") public int get3 () { getSp(3); return get1U() | get1U() << 8 | get1U() << 16; }
@SuppressWarnings("unused") public int get4 () { return getSp(4).getInt (); }
@SuppressWarnings("unused") public float get4f() { return getSp(4).getFloat (); }
@SuppressWarnings("unused") public long get8 () { return getSp(8).getLong (); }
@SuppressWarnings("unused") public double get8d() { return getSp(8).getDouble(); }
int get1U(int off) { return _bb.get (off)&0xFF; }
int get4 (int off) { return _bb.getInt (off); }
long get8 (int off) { return _bb.getLong(off); }
@SuppressWarnings("unused") public AutoBuffer putZ (boolean b){ return put1(b?1:0); }
@SuppressWarnings("unused") public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte";
putSp(1).put((byte)b); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; }
@SuppressWarnings("unused") public AutoBuffer put2s ( short s) { return put2(s); }
@SuppressWarnings("unused") public AutoBuffer put3( int x ) { assert (-1<<24) <= x && x < (1<<24);
return put1((x)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16); }
@SuppressWarnings("unused") public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; }
@SuppressWarnings("unused") public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; }
@SuppressWarnings("unused") public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; }
@SuppressWarnings("unused") public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; }
public AutoBuffer put(Freezable f) {
if( f == null ) return putInt(TypeMap.NULL);
assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName();
putInt(f.frozenType());
return f.write(this);
}
public <T extends Freezable> T get() {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
return (T)TypeMap.newFreezable(id).read(this);
}
public <T extends Freezable> T get(Class<T> tc) {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
assert tc.isInstance(TypeMap.theFreezable(id)):tc.getName() + " != " + TypeMap.theFreezable(id).getClass().getName() + ", id = " + id;
return (T)TypeMap.newFreezable(id).read(this);
}
// Write Key's target IFF the Key is not null; target can be null.
public AutoBuffer putKey(Key k) {
if( k==null ) return this; // Key is null ==> write nothing
Keyed kd = DKV.getGet(k);
put(kd);
return kd == null ? this : kd.writeAll_impl(this);
}
public Keyed getKey(Key k, Futures fs) {
return k==null ? null : getKey(fs); // Key is null ==> read nothing
}
public Keyed getKey(Futures fs) {
Keyed kd = get(Keyed.class);
if( kd == null ) return null;
DKV.put(kd,fs);
return kd.readAll_impl(this,fs);
}
// Put a (compressed) integer. Specifically values in the range -1 to ~250
// will take 1 byte, values near a Short will take 1+2 bytes, values near an
// Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is
// optimized for small integers (including -1 which is often used as a "array
// is null" flag when passing the array length).
public AutoBuffer putInt(int x) {
if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1);
if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x);
return put1(254).put4(x);
}
// Get a (compressed) integer. See above for the compression strategy and reasoning.
int getInt( ) {
int x = get1U();
if( x <= 253 ) return x-1;
if( x==255 ) return (short)get2();
assert x==254;
return get4();
}
// Put a zero-compressed array. Compression is:
// If null : putInt(-1)
// Else
// putInt(# of leading nulls)
// putInt(# of non-nulls)
// If # of non-nulls is > 0, putInt( # of trailing nulls)
long putZA( Object[] A ) {
if( A==null ) { putInt(-1); return 0; }
int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break;
int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break;
putInt(x); // Leading zeros to skip
putInt(y-x); // Mixed non-zero guts in middle
if( y > x ) // If any trailing nulls
putInt(A.length-y); // Trailing zeros
return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros
}
// Get the lengths of a zero-compressed array.
// Returns -1 if null.
// Returns a long of (leading zeros | middle non-zeros).
// If there are non-zeros, caller has to read the trailing zero-length.
long getZA( ) {
int x=getInt(); // Length of leading zeros
if( x == -1 ) return -1; // or a null
int nz=getInt(); // Non-zero in the middle
return ((long)x<<32)|(long)nz; // Return both ints
}
// TODO: untested. . .
@SuppressWarnings("unused")
public AutoBuffer putAEnum(Enum[] enums) {
//_arys++;
long xy = putZA(enums);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putEnum(enums[i]);
return this;
}
@SuppressWarnings("unused")
public <E extends Enum> E[] getAEnum(E[] values) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
E[] ts = (E[]) Array.newInstance(values.getClass().getComponentType(), x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getEnum(values);
return ts;
}
@SuppressWarnings("unused")
public AutoBuffer putA(Freezable[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) put(fs[i]);
return this;
}
public AutoBuffer putAA(Freezable[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAA(Freezable[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA(fs[i]);
return this;
}
public <T extends Freezable> T[] getA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = get(tc);
return ts;
}
public <T extends Freezable> T[][] getAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass();
T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getA(tc);
return ts;
}
@SuppressWarnings("unused") public <T extends Freezable> T[][][] getAAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass();
Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass();
T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc);
return ts;
}
public AutoBuffer putAStr(String[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putStr(fs[i]);
return this;
}
public String[] getAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[] ts = new String[x+y+z];
for( int i = x; i < x+y; ++i ) ts[i] = getStr();
return ts;
}
@SuppressWarnings("unused") public AutoBuffer putAAStr(String[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAStr(fs[i]);
return this;
}
@SuppressWarnings("unused") public String[][] getAAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[][] ts = new String[x+y+z][];
for( int i = x; i < x+y; ++i ) ts[i] = getAStr();
return ts;
}
// Read the smaller of _bb.remaining() and len into buf.
// Return bytes read, which could be zero.
int read( byte[] buf, int off, int len ) {
int sz = Math.min(_bb.remaining(),len);
_bb.get(buf,off,sz);
return sz;
}
// -----------------------------------------------
// Utility functions to handle common UDP packet tasks.
// Get the 1st control byte
int getCtrl( ) { return getSz(1).get(0)&0xFF; }
// Get the port in next 2 bytes
int getPort( ) { return getSz(1+2).getChar(1); }
// Get the task# in the next 4 bytes
int getTask( ) { return getSz(1+2+4).getInt(1+2); }
// Get the flag in the next 1 byte
int getFlag( ) { return getSz(1+2+4+1).get(1+2+4); }
// Set the ctrl, port, task. Ready to write more bytes afterwards
AutoBuffer putUdp (UDP.udp type) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2);
_bb.put ((byte)type.ordinal());
_bb.putChar((char)H2O.H2O_PORT ); // Outgoing port is always the sender's (me) port
return this;
}
AutoBuffer putTask(UDP.udp type, int tasknum) {
return putUdp(type).put4(tasknum);
}
AutoBuffer putTask(int ctrl, int tasknum) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2+4);
_bb.put((byte)ctrl).putChar((char)H2O.H2O_PORT).putInt(tasknum);
return this;
}
// -----------------------------------------------
// Utility functions to read & write arrays
public boolean[] getAZ() {
int len = getInt();
if (len == -1) return null;
boolean[] r = new boolean[len];
for (int i=0;i<len;++i) r[i] = getZ();
return r;
}
public byte[] getA1( ) {
//_arys++;
int len = getInt();
return len == -1 ? null : getA1(len);
}
public byte[] getA1( int len ) {
byte[] buf = MemoryManager.malloc1(len);
int sofar = 0;
while( sofar < len ) {
int more = Math.min(_bb.remaining(), len - sofar);
_bb.get(buf, sofar, more);
sofar += more;
if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar));
}
return buf;
}
public short[] getA2( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
short[] buf = MemoryManager.malloc2(len);
int sofar = 0;
while( sofar < buf.length ) {
ShortBuffer as = _bb.asShortBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*2);
if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2));
}
return buf;
}
public int[] getA4( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
int[] buf = MemoryManager.malloc4(len);
int sofar = 0;
while( sofar < buf.length ) {
IntBuffer as = _bb.asIntBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public float[] getA4f( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
float[] buf = MemoryManager.malloc4f(len);
int sofar = 0;
while( sofar < buf.length ) {
FloatBuffer as = _bb.asFloatBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public long[] getA8( ) {
//_arys++;
// Get the lengths of lead & trailing zero sections, and the non-zero
// middle section.
int x = getInt(); if( x == -1 ) return null;
int y = getInt(); // Non-zero in the middle
int z = y==0 ? 0 : getInt();// Trailing zeros
long[] buf = MemoryManager.malloc8(x+y+z);
switch( get1U() ) { // 1,2,4 or 8 for how the middle section is passed
case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1U(); return buf;
case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf;
case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf;
case 8: break;
default: throw H2O.fail();
}
int sofar = x;
while( sofar < x+y ) {
LongBuffer as = _bb.asLongBuffer();
int more = Math.min(as.remaining(), x+y - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8));
}
return buf;
}
public double[] getA8d( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
double[] buf = MemoryManager.malloc8d(len);
int sofar = 0;
while( sofar < len ) {
DoubleBuffer as = _bb.asDoubleBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8));
}
return buf;
}
@SuppressWarnings("unused")
public byte[][] getAA1( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
byte[][] ary = new byte[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA1();
return ary;
}
@SuppressWarnings("unused")
public short[][] getAA2( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
short[][] ary = new short[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA2();
return ary;
}
public int[][] getAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][] ary = new int[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4();
return ary;
}
@SuppressWarnings("unused") public float[][] getAA4f( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
float[][] ary = new float[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4f();
return ary;
}
public long[][] getAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][] ary = new long[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8();
return ary;
}
@SuppressWarnings("unused") public double[][] getAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][] ary = new double[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8d();
return ary;
}
@SuppressWarnings("unused") public int[][][] getAAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][][] ary = new int[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA4();
return ary;
}
@SuppressWarnings("unused") public long[][][] getAAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][][] ary = new long[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8();
return ary;
}
public double[][][] getAAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][][] ary = new double[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8d();
return ary;
}
public String getStr( ) {
int len = getInt();
return len == -1 ? null : new String(getA1(len), UTF_8);
}
public <E extends Enum> E getEnum(E[] values ) {
int idx = get1();
return idx == -1 ? null : values[idx];
}
public AutoBuffer putAZ( boolean[] ary ) {
if( ary == null ) return putInt(-1);
putInt(ary.length);
for (boolean anAry : ary) putZ(anAry);
return this;
}
public AutoBuffer putA1( byte[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
return putA1(ary,ary.length);
}
public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); }
public AutoBuffer putA1( byte[] ary, int sofar, int length ) {
if (length - sofar > _bb.remaining()) expandByteBuffer(length-sofar);
while( sofar < length ) {
int len = Math.min(length - sofar, _bb.remaining());
_bb.put(ary, sofar, len);
sofar += len;
if( sofar < length ) sendPartial();
}
return this;
}
AutoBuffer putA2( short[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*2 > _bb.remaining()) expandByteBuffer(ary.length*2);
int sofar = 0;
while( sofar < ary.length ) {
ShortBuffer sb = _bb.asShortBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*2);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA4( int[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
// Note: based on Brandon commit this should improve performance during parse (7d950d622ee3037555ecbab0e39404f8f0917652)
if (ary.length*4 > _bb.remaining()) {
expandByteBuffer(ary.length*4); // Try to expand BB buffer to fit input array
}
int sofar = 0;
while( sofar < ary.length ) {
IntBuffer ib = _bb.asIntBuffer();
int len = Math.min(ary.length - sofar, ib.remaining());
ib.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + ib.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8( long[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
// Trim leading & trailing zeros. Pass along the length of leading &
// trailing zero sections, and the non-zero section in the middle.
int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break;
int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break;
int nzlen = y-x;
putInt(x);
putInt(nzlen);
if( nzlen > 0 ) // If any trailing nulls
putInt(ary.length-y); // Trailing zeros
// Size trim the NZ section: pass as bytes or shorts if possible.
long min=Long.MAX_VALUE, max=Long.MIN_VALUE;
for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; }
if( 0 <= min && max < 256 ) { // Ship as unsigned bytes
put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]);
return this;
}
if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts
put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]);
return this;
}
if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints
put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]);
return this;
}
put1(8); // Ship as full longs
int sofar = x;
if ((y-sofar)*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
while( sofar < y ) {
LongBuffer lb = _bb.asLongBuffer();
int len = Math.min(y - sofar, lb.remaining());
lb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + lb.position() * 8);
if( sofar < y ) sendPartial();
}
return this;
}
public AutoBuffer putA4f( float[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*4 > _bb.remaining()) expandByteBuffer(ary.length*4);
int sofar = 0;
while( sofar < ary.length ) {
FloatBuffer fb = _bb.asFloatBuffer();
int len = Math.min(ary.length - sofar, fb.remaining());
fb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + fb.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8d( double[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
int sofar = 0;
while( sofar < ary.length ) {
DoubleBuffer db = _bb.asDoubleBuffer();
int len = Math.min(ary.length - sofar, db.remaining());
db.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + db.position()*8);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putAA1( byte[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA1(ary[i]);
return this;
}
@SuppressWarnings("unused") AutoBuffer putAA2( short[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA2(ary[i]);
return this;
}
public AutoBuffer putAA4( int[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4(ary[i]);
return this;
}
@SuppressWarnings("unused")
public AutoBuffer putAA4f( float[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4f(ary[i]);
return this;
}
public AutoBuffer putAA8( long[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8(ary[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAA8d( double[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8d(ary[i]);
return this;
}
public AutoBuffer putAAA4( int[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA4(ary[i]);
return this;
}
public AutoBuffer putAAA8( long[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8(ary[i]);
return this;
}
public AutoBuffer putAAA8d( double[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8d(ary[i]);
return this;
}
// Put a String as bytes (not chars!)
public AutoBuffer putStr( String s ) {
if( s==null ) return putInt(-1);
return putA1(StringUtils.bytesOf(s));
}
@SuppressWarnings("unused") public AutoBuffer putEnum( Enum x ) {
return put1(x==null ? -1 : x.ordinal());
}
public static byte[] javaSerializeWritePojo(Object o) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream out = null;
try {
out = new ObjectOutputStream(bos);
out.writeObject(o);
out.close();
return bos.toByteArray();
} catch (IOException e) {
throw Log.throwErr(e);
}
}
public static Object javaSerializeReadPojo(byte [] bytes) {
try {
final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
Object o = ois.readObject();
return o;
} catch (IOException e) {
String className = nameOfClass(bytes);
throw Log.throwErr(new RuntimeException("Failed to deserialize " + className, e));
} catch (ClassNotFoundException e) {
throw Log.throwErr(e);
}
}
static String nameOfClass(byte[] bytes) {
if (bytes == null) return "(null)";
if (bytes.length < 11) return "(no name)";
int nameSize = Math.min(40, Math.max(3, bytes[7]));
return new String(bytes, 8, Math.min(nameSize, bytes.length - 8));
}
// ==========================================================================
// Java Serializable objects
// Note: These are heck-a-lot more expensive than their Freezable equivalents.
@SuppressWarnings("unused") public AutoBuffer putSer( Object obj ) {
if (obj == null) return putA1(null);
return putA1(javaSerializeWritePojo(obj));
}
@SuppressWarnings("unused") public AutoBuffer putASer(Object[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putSer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAASer(Object[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAASer(Object[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public Object getSer() {
byte[] ba = getA1();
return ba == null ? null : javaSerializeReadPojo(ba);
}
@SuppressWarnings("unused") public <T> T getSer(Class<T> tc) {
return (T)getSer();
}
@SuppressWarnings("unused") public <T> T[] getASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getSer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][] getAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][] ts = (T[][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getASer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][][] getAAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][][] ts = (T[][][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAASer(tc);
return ts;
}
// ==========================================================================
// JSON AutoBuffer printers
public AutoBuffer putJNULL( ) { return put1('n').put1('u').put1('l').put1('l'); }
// Escaped JSON string
private AutoBuffer putJStr( String s ) {
byte[] b = StringUtils.bytesOf(s);
int off=0;
for( int i=0; i<b.length; i++ ) {
if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes
putA1(b,off,i); // Everything so far (no backslashes)
put1('\\'); // The extra backslash
off=i; // Advance the "so far" variable
}
// Handle remaining special cases in JSON
// if( b[i] == '/' ) { putA1(b,off,i); put1('\\'); put1('/'); off=i+1; continue;}
if( b[i] == '\b' ) { putA1(b,off,i); put1('\\'); put1('b'); off=i+1; continue;}
if( b[i] == '\f' ) { putA1(b,off,i); put1('\\'); put1('f'); off=i+1; continue;}
if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; continue;}
if( b[i] == '\r' ) { putA1(b,off,i); put1('\\'); put1('r'); off=i+1; continue;}
if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; continue;}
// ASCII Control characters
if( b[i] == 127 ) { putA1(b,off,i); put1('\\'); put1('u'); put1('0'); put1('0'); put1('7'); put1('f'); off=i+1; continue;}
if( b[i] >= 0 && b[i] < 32 ) {
String hexStr = Integer.toHexString(b[i]);
putA1(b, off, i); put1('\\'); put1('u');
for (int j = 0; j < 4 - hexStr.length(); j++) put1('0');
for (int j = 0; j < hexStr.length(); j++) put1(hexStr.charAt(hexStr.length()-j-1));
off=i+1;
}
}
return putA1(b,off,b.length);
}
public AutoBuffer putJSONStrUnquoted ( String s ) { return s==null ? putJNULL() : putJStr(s); }
public AutoBuffer putJSONStrUnquoted ( String name, String s ) { return s==null ? putJSONStr(name).put1(':').putJNULL() : putJSONStr(name).put1(':').putJStr(s); }
public AutoBuffer putJSONName( String s ) { return put1('"').putJStr(s).put1('"'); }
public AutoBuffer putJSONStr ( String s ) { return s==null ? putJNULL() : putJSONName(s); }
public AutoBuffer putJSONAStr(String[] ss) {
if( ss == null ) return putJNULL();
put1('[');
for( int i=0; i<ss.length; i++ ) {
if( i>0 ) put1(',');
putJSONStr(ss[i]);
}
return put1(']');
}
private AutoBuffer putJSONAAStr( String[][] sss) {
if( sss == null ) return putJNULL();
put1('[');
for( int i=0; i<sss.length; i++ ) {
if( i>0 ) put1(',');
putJSONAStr(sss[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSONStr (String name, String s ) { return putJSONStr(name).put1(':').putJSONStr(s); }
@SuppressWarnings("unused") public AutoBuffer putJSONAStr (String name, String[] ss ) { return putJSONStr(name).put1(':').putJSONAStr(ss); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAStr(String name, String[][]sss) { return putJSONStr(name).put1(':').putJSONAAStr(sss); }
@SuppressWarnings("unused") public AutoBuffer putJSONSer (String name, Object o ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONASer (String name, Object[] oo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAASer (String name, Object[][] ooo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAASer(String name, Object[][][] oooo) { return putJSONStr(name).put1(':').putJNULL(); }
public AutoBuffer putJSONAZ( String name, boolean[] f) { return putJSONStr(name).put1(':').putJSONAZ(f); }
public AutoBuffer putJSON(Freezable ice) { return ice == null ? putJNULL() : ice.writeJSON(this); }
public AutoBuffer putJSONA( Freezable fs[] ) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSON(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA( Freezable fs[][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONA(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAA( Freezable fs[][][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA(fs[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON ( String name, Freezable f ) { return putJSONStr(name).put1(':').putJSON (f); }
public AutoBuffer putJSONA ( String name, Freezable f[] ) { return putJSONStr(name).put1(':').putJSONA (f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA( String name, Freezable f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAA( String name, Freezable f[][][]){ return putJSONStr(name).put1(':').putJSONAAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONZ( String name, boolean value ) { return putJSONStr(name).put1(':').putJStr("" + value); }
private AutoBuffer putJSONAZ(boolean [] b) {
if (b == null) return putJNULL();
put1('[');
for( int i = 0; i < b.length; ++i) {
if (i > 0) put1(',');
putJStr(""+b[i]);
}
return put1(']');
}
// Most simple integers
private AutoBuffer putJInt( int i ) {
byte b[] = StringUtils.toBytes(i);
return putA1(b,b.length);
}
public AutoBuffer putJSON1( byte b ) { return putJInt(b); }
public AutoBuffer putJSONA1( byte ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON1(ary[i]);
}
return put1(']');
}
private AutoBuffer putJSONAA1(byte ary[][]) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA1(ary[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON1 (String name, byte b ) { return putJSONStr(name).put1(':').putJSON1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONA1 (String name, byte b[] ) { return putJSONStr(name).put1(':').putJSONA1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA1(String name, byte b[][]) { return putJSONStr(name).put1(':').putJSONAA1(b); }
public AutoBuffer putJSONAEnum(String name, Enum[] enums) {
return putJSONStr(name).put1(':').putJSONAEnum(enums);
}
public AutoBuffer putJSONAEnum( Enum[] enums ) {
if( enums == null ) return putJNULL();
put1('[');
for( int i=0; i<enums.length; i++ ) {
if( i>0 ) put1(',');
putJSONEnum(enums[i]);
}
return put1(']');
}
AutoBuffer putJSON2( char c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, char c ) { return putJSONStr(name).put1(':').putJSON2(c); }
AutoBuffer putJSON2( short c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, short c ) { return putJSONStr(name).put1(':').putJSON2(c); }
public AutoBuffer putJSONA2( String name, short ary[] ) { return putJSONStr(name).put1(':').putJSONA2(ary); }
AutoBuffer putJSONA2( short ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON2(ary[i]);
}
return put1(']');
}
AutoBuffer putJSON8 ( long l ) { return putJStr(Long.toString(l)); }
AutoBuffer putJSONA8( long ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAA8( long ary[][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8( long ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONEnum( Enum e ) {
return e==null ? putJNULL() : put1('"').putJStr(e.toString()).put1('"');
}
public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); }
public AutoBuffer putJSONEnum( String name, Enum e ) { return putJSONStr(name).put1(':').putJSONEnum(e); }
public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); }
public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); }
public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); }
public AutoBuffer putJSON4(int i) { return putJStr(Integer.toString(i)); }
AutoBuffer putJSONA4( int[] a) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAA4( int[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA4( int[][][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA4(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); }
public AutoBuffer putJSONA4( String name, int[] a) { return putJSONStr(name).put1(':').putJSONA4(a); }
public AutoBuffer putJSONAA4( String name, int[][] a ) { return putJSONStr(name).put1(':').putJSONAA4(a); }
public AutoBuffer putJSONAAA4( String name, int[][][] a ) { return putJSONStr(name).put1(':').putJSONAAA4(a); }
AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putJStr(Float .toString(f)))); }
public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); }
AutoBuffer putJSONA4f( float[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4f(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA4f(String name, float[] a) {
putJSONStr(name).put1(':');
return putJSONA4f(a);
}
AutoBuffer putJSONAA4f(String name, float[][] a) {
putJSONStr(name).put1(':');
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4f(a[i]);
}
return put1(']');
}
AutoBuffer putJSON8d( double d ) {
if (TwoDimTable.isEmpty(d)) return putJNULL();
return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putJStr(Double.toString(d))));
}
public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); }
public AutoBuffer putJSONA8d( String name, double[] a ) {
return putJSONStr(name).put1(':').putJSONA8d(a);
}
public AutoBuffer putJSONAA8d( String name, double[][] a) {
return putJSONStr(name).put1(':').putJSONAA8d(a);
}
public AutoBuffer putJSONAAA8d( String name, double[][][] a) { return putJSONStr(name).put1(':').putJSONAAA8d(a); }
public AutoBuffer putJSONA8d( double[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON8d(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA8d( double[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8d(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8d( double ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8d(ary[i]);
}
return put1(']');
}
static final String JSON_NAN = "NaN";
static final String JSON_POS_INF = "Infinity";
static final String JSON_NEG_INF = "-Infinity";
}
| mathemage/h2o-3 | h2o-core/src/main/java/water/AutoBuffer.java | Java | apache-2.0 | 74,621 |
"use strict";
import chai from "chai";
import chaiAsPromised from "chai-as-promised";
import sinon from "sinon";
import BusinessElementsClient from "../src";
import uuid from "uuid";
import * as requests from "../src/requests";
chai.use(chaiAsPromised);
chai.should();
chai.config.includeStack = true;
const FAKE_SERVER_URL = "http://api.fake-server";
/** @test {Attribute} */
describe("Attribute", () => {
let sandbox, client, attributeId, attribute;
beforeEach(() => {
sandbox = sinon.sandbox.create();
client = new BusinessElementsClient(FAKE_SERVER_URL);
attributeId = uuid.v4();
attribute = client.tenant("example.com").attributes().attribute(attributeId);
});
afterEach(() => {
sandbox.restore();
});
/** @test {Attribute#get} */
describe("#get()", () => {
const data = {id: attributeId};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(data));
});
it("should get capture", () => {
attribute.get();
sinon.assert.calledWithMatch(client.execute, {
path: `/attributes/${attributeId}`
});
});
it("should return attribute data", () => {
return attribute.get().should.become(data);
});
});
/** @test {Attribute#edit} */
describe("#edit()", () => {
const response = {status: "Ok"};
const schema = {
"type": "object",
"properties": {
"type": {
"title": "type",
"type": "string"
}
}
};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(response));
sandbox.spy(requests, "updateAttribute");
});
it("should edit the attribute", () => {
attribute.edit(schema, {});
sinon.assert.calledWithMatch(requests.updateAttribute, attributeId, schema);
});
it("should return success", () => {
return attribute.edit(schema, {}).should.eventually.become(response);
});
});
/** @test {Attribute#remove} */
describe("#remove()", () => {
const response = {status: "Ok"};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(response));
sandbox.spy(requests, "deleteAttribute");
});
it("should delete the attribute", () => {
attribute.remove({});
sinon.assert.calledWithMatch(requests.deleteAttribute, attributeId);
});
it("should return success", () => {
return attribute.remove({}).should.eventually.become(response);
});
});
});
| Product-Foundry/business-elements-client-js | test/attribute_test.js | JavaScript | apache-2.0 | 2,495 |
/*
* Copyright (c) 2010-2013 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.model.impl.lens;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import javax.xml.namespace.QName;
import com.evolveum.midpoint.prism.*;
import com.evolveum.midpoint.schema.DeltaConvertor;
import com.evolveum.midpoint.schema.result.OperationResult;
import com.evolveum.midpoint.util.exception.*;
import com.evolveum.midpoint.xml.ns._public.model.model_context_3.LensProjectionContextType;
import org.apache.commons.lang.StringUtils;
import org.jvnet.jaxb2_commons.lang.Validate;
import com.evolveum.midpoint.common.crypto.CryptoUtil;
import com.evolveum.midpoint.common.refinery.RefinedObjectClassDefinition;
import com.evolveum.midpoint.common.refinery.RefinedResourceSchema;
import com.evolveum.midpoint.common.refinery.ResourceShadowDiscriminator;
import com.evolveum.midpoint.model.api.context.ModelProjectionContext;
import com.evolveum.midpoint.model.api.context.SynchronizationPolicyDecision;
import com.evolveum.midpoint.prism.delta.ChangeType;
import com.evolveum.midpoint.prism.delta.DeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ObjectDelta;
import com.evolveum.midpoint.prism.delta.PrismValueDeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ReferenceDelta;
import com.evolveum.midpoint.prism.path.ItemPath;
import com.evolveum.midpoint.schema.processor.ResourceAttribute;
import com.evolveum.midpoint.schema.processor.ResourceSchema;
import com.evolveum.midpoint.schema.util.MiscSchemaUtil;
import com.evolveum.midpoint.schema.util.ShadowUtil;
import com.evolveum.midpoint.schema.util.ResourceTypeUtil;
import com.evolveum.midpoint.schema.util.SchemaDebugUtil;
import com.evolveum.midpoint.util.Cloner;
import com.evolveum.midpoint.util.DebugUtil;
import com.evolveum.midpoint.xml.ns._public.common.common_3.AssignmentPolicyEnforcementType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.FocusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.LayerType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultStatusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ProjectionPolicyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDefinitionType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDependencyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowAssociationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowDiscriminatorType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowKindType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.SynchronizationSituationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ValuePolicyType;
/**
* @author semancik
*
*/
public class LensProjectionContext extends LensElementContext<ShadowType> implements ModelProjectionContext {
private ObjectDelta<ShadowType> syncDelta;
/**
* If set to true: absolute state of this projection was detected by the synchronization.
* This is mostly for debugging and visibility. It is not used by projection logic.
*/
private boolean syncAbsoluteTrigger = false;
/**
* The wave in which this resource should be processed. Initial value of -1 means "undetermined".
*/
private int wave = -1;
/**
* Indicates that the wave computation is still in progress.
*/
private transient boolean waveIncomplete = false;
/**
* Definition of account type.
*/
private ResourceShadowDiscriminator resourceShadowDiscriminator;
private boolean fullShadow = false;
/**
* True if the account is "legal" (assigned to the user). It may be false for accounts that are either
* found to be illegal by live sync, were unassigned from user, etc.
* If set to null the situation is not yet known. Null is a typical value when the context is constructed.
*/
private boolean isAssigned;
/**
* True if the account should be part of the synchronization. E.g. outbound expression should be applied to it.
*/
private boolean isActive;
/**
* True if there is a valid assignment for this projection and/or the policy allows such project to exist.
*/
private Boolean isLegal = null;
private Boolean isLegalOld = null;
private boolean isExists;
/**
* Decision regarding the account. It indicated what the engine has DECIDED TO DO with the context.
* If set to null no decision was made yet. Null is also a typical value when the context is created.
*/
private SynchronizationPolicyDecision synchronizationPolicyDecision;
/**
* True if we want to reconcile account in this context.
*/
private boolean doReconciliation;
/**
* Synchronization situation as it was originally detected by the synchronization code (SynchronizationService).
* This is mostly for debug purposes. Projector and clockwork do not need to care about this.
* The synchronization intent is used instead.
*/
private SynchronizationSituationType synchronizationSituationDetected = null;
/**
* Synchronization situation which was the result of synchronization reaction (projector and clockwork run).
* This is mostly for debug purposes. Projector and clockwork do not care about this (except for setting it).
* The synchronization decision is used instead.
*/
private SynchronizationSituationType synchronizationSituationResolved = null;
/**
* Delta set triple for accounts. Specifies which accounts should be added, removed or stay as they are.
* It tells almost nothing about attributes directly although the information about attributes are inside
* each account construction (in a form of ValueConstruction that contains attribute delta triples).
*
* Intermediary computation result. It is stored to allow re-computing of account constructions during
* iterative computations.
*/
private transient PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple;
private transient Construction outboundConstruction;
private transient Collection<ResourceObjectTypeDependencyType> dependencies = null;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations;
private ValuePolicyType accountPasswordPolicy;
/**
* Resource that hosts this projection.
*/
transient private ResourceType resource;
LensProjectionContext(LensContext<? extends ObjectType> lensContext, ResourceShadowDiscriminator resourceAccountType) {
super(ShadowType.class, lensContext);
this.resourceShadowDiscriminator = resourceAccountType;
this.isAssigned = false;
}
public ObjectDelta<ShadowType> getSyncDelta() {
return syncDelta;
}
public void setSyncDelta(ObjectDelta<ShadowType> syncDelta) {
this.syncDelta = syncDelta;
}
public boolean isSyncAbsoluteTrigger() {
return syncAbsoluteTrigger;
}
public void setSyncAbsoluteTrigger(boolean syncAbsoluteTrigger) {
this.syncAbsoluteTrigger = syncAbsoluteTrigger;
}
public int getWave() {
return wave;
}
public void setWave(int wave) {
this.wave = wave;
}
public boolean isWaveIncomplete() {
return waveIncomplete;
}
public void setWaveIncomplete(boolean waveIncomplete) {
this.waveIncomplete = waveIncomplete;
}
public boolean isDoReconciliation() {
return doReconciliation;
}
public void setDoReconciliation(boolean doReconciliation) {
this.doReconciliation = doReconciliation;
}
public ResourceShadowDiscriminator getResourceShadowDiscriminator() {
return resourceShadowDiscriminator;
}
public void setResourceShadowDiscriminator(ResourceShadowDiscriminator resourceShadowDiscriminator) {
this.resourceShadowDiscriminator = resourceShadowDiscriminator;
}
public boolean compareResourceShadowDiscriminator(ResourceShadowDiscriminator rsd, boolean compareOrder) {
Validate.notNull(rsd.getResourceOid());
if (resourceShadowDiscriminator == null) {
// This may be valid case e.g. in case of broken contexts or if a context is just loading
return false;
}
if (!rsd.getResourceOid().equals(resourceShadowDiscriminator.getResourceOid())) {
return false;
}
if (!rsd.getKind().equals(resourceShadowDiscriminator.getKind())) {
return false;
}
if (rsd.isThombstone() != resourceShadowDiscriminator.isThombstone()) {
return false;
}
if (rsd.getIntent() == null) {
try {
if (!getRefinedAccountDefinition().isDefaultInAKind()) {
return false;
}
} catch (SchemaException e) {
throw new SystemException("Internal error: "+e.getMessage(), e);
}
} else if (!rsd.getIntent().equals(resourceShadowDiscriminator.getIntent())) {
return false;
}
if (compareOrder && rsd.getOrder() != resourceShadowDiscriminator.getOrder()) {
return false;
}
return true;
}
public boolean isThombstone() {
if (resourceShadowDiscriminator == null) {
return false;
}
return resourceShadowDiscriminator.isThombstone();
}
public void addAccountSyncDelta(ObjectDelta<ShadowType> delta) throws SchemaException {
if (syncDelta == null) {
syncDelta = delta;
} else {
syncDelta.merge(delta);
}
}
public boolean isAdd() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isAdd();
}
public boolean isModify() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.KEEP) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isModify();
}
public boolean isDelete() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.DELETE) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
if (syncDelta != null && syncDelta.isDelete()) {
return true;
}
return super.isDelete();
}
public ResourceType getResource() {
return resource;
}
public void setResource(ResourceType resource) {
this.resource = resource;
}
public boolean isAssigned() {
return isAssigned;
}
public void setAssigned(boolean isAssigned) {
this.isAssigned = isAssigned;
}
public boolean isActive() {
return isActive;
}
public void setActive(boolean isActive) {
this.isActive = isActive;
}
public Boolean isLegal() {
return isLegal;
}
public void setLegal(Boolean isLegal) {
this.isLegal = isLegal;
}
public Boolean isLegalOld() {
return isLegalOld;
}
public void setLegalOld(Boolean isLegalOld) {
this.isLegalOld = isLegalOld;
}
public boolean isExists() {
return isExists;
}
public void setExists(boolean exists) {
this.isExists = exists;
}
public SynchronizationPolicyDecision getSynchronizationPolicyDecision() {
return synchronizationPolicyDecision;
}
public void setSynchronizationPolicyDecision(SynchronizationPolicyDecision policyDecision) {
this.synchronizationPolicyDecision = policyDecision;
}
public SynchronizationSituationType getSynchronizationSituationDetected() {
return synchronizationSituationDetected;
}
public void setSynchronizationSituationDetected(
SynchronizationSituationType synchronizationSituationDetected) {
this.synchronizationSituationDetected = synchronizationSituationDetected;
}
public SynchronizationSituationType getSynchronizationSituationResolved() {
return synchronizationSituationResolved;
}
public void setSynchronizationSituationResolved(
SynchronizationSituationType synchronizationSituationResolved) {
this.synchronizationSituationResolved = synchronizationSituationResolved;
}
public boolean isFullShadow() {
return fullShadow;
}
/**
* Returns true if full shadow is available, either loaded or in a create delta.
*/
public boolean hasFullShadow() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
}
return isFullShadow();
}
public void setFullShadow(boolean fullShadow) {
this.fullShadow = fullShadow;
}
public ShadowKindType getKind() {
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
return discr.getKind();
}
if (getObjectOld()!=null) {
return getObjectOld().asObjectable().getKind();
}
if (getObjectCurrent()!=null) {
return getObjectCurrent().asObjectable().getKind();
}
if (getObjectNew()!=null) {
return getObjectNew().asObjectable().getKind();
}
return ShadowKindType.ACCOUNT;
}
public PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> getConstructionDeltaSetTriple() {
return constructionDeltaSetTriple;
}
public void setConstructionDeltaSetTriple(
PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple) {
this.constructionDeltaSetTriple = constructionDeltaSetTriple;
}
public Construction getOutboundConstruction() {
return outboundConstruction;
}
public void setOutboundConstruction(Construction outboundConstruction) {
this.outboundConstruction = outboundConstruction;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> getSqueezedAttributes() {
return squeezedAttributes;
}
public void setSqueezedAttributes(Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes) {
this.squeezedAttributes = squeezedAttributes;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> getSqueezedAssociations() {
return squeezedAssociations;
}
public void setSqueezedAssociations(
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations) {
this.squeezedAssociations = squeezedAssociations;
}
public ResourceObjectTypeDefinitionType getResourceObjectTypeDefinitionType() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return null;
}
ResourceObjectTypeDefinitionType def = ResourceTypeUtil.getResourceObjectTypeDefinitionType(
resource, getResourceShadowDiscriminator().getKind(), resourceShadowDiscriminator.getIntent());
return def;
}
private ResourceSchema getResourceSchema() throws SchemaException {
return RefinedResourceSchema.getResourceSchema(resource, getNotNullPrismContext());
}
public RefinedResourceSchema getRefinedResourceSchema() throws SchemaException {
if (resource == null) {
return null;
}
return RefinedResourceSchema.getRefinedSchema(resource, LayerType.MODEL, getNotNullPrismContext());
}
public RefinedObjectClassDefinition getRefinedAccountDefinition() throws SchemaException {
RefinedResourceSchema refinedSchema = getRefinedResourceSchema();
if (refinedSchema == null) {
return null;
}
return refinedSchema.getRefinedDefinition(getResourceShadowDiscriminator().getKind(), getResourceShadowDiscriminator().getIntent());
}
public Collection<ResourceObjectTypeDependencyType> getDependencies() {
if (dependencies == null) {
ResourceObjectTypeDefinitionType resourceAccountTypeDefinitionType = getResourceObjectTypeDefinitionType();
if (resourceAccountTypeDefinitionType == null) {
// No dependencies. But we cannot set null as that means "unknown". So let's set empty collection instead.
dependencies = new ArrayList<ResourceObjectTypeDependencyType>();
} else {
dependencies = resourceAccountTypeDefinitionType.getDependency();
}
}
return dependencies;
}
public ValuePolicyType getAccountPasswordPolicy() {
return accountPasswordPolicy;
}
public void setAccountPasswordPolicy(ValuePolicyType accountPasswordPolicy) {
this.accountPasswordPolicy = accountPasswordPolicy;
}
public ValuePolicyType getEffectivePasswordPolicy() {
if (accountPasswordPolicy != null) {
return accountPasswordPolicy;
}
if (getLensContext().getFocusContext().getOrgPasswordPolicy() != null){
return getLensContext().getFocusContext().getOrgPasswordPolicy();
}
return getLensContext().getGlobalPasswordPolicy();
}
public AssignmentPolicyEnforcementType getAssignmentPolicyEnforcementType() {
// TODO: per-resource assignment enforcement
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
AssignmentPolicyEnforcementType globalAssignmentPolicyEnforcement = MiscSchemaUtil.getAssignmentPolicyEnforcementType(globalAccountSynchronizationSettings);
return globalAssignmentPolicyEnforcement;
}
public boolean isLegalize(){
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
if (globalAccountSynchronizationSettings == null){
return false;
}
if (globalAccountSynchronizationSettings.isLegalize() == null){
return false;
}
return globalAccountSynchronizationSettings.isLegalize();
}
/**
* Recomputes the new state of account (accountNew). It is computed by applying deltas to the old state (accountOld).
* Assuming that oldAccount is already set (or is null if it does not exist)
*/
public void recompute() throws SchemaException {
ObjectDelta<ShadowType> accDelta = getDelta();
PrismObject<ShadowType> base = getObjectCurrent();
if (base == null) {
base = getObjectOld();
}
ObjectDelta<ShadowType> syncDelta = getSyncDelta();
if (base == null && syncDelta != null
&& ChangeType.ADD.equals(syncDelta.getChangeType())) {
PrismObject<ShadowType> objectToAdd = syncDelta.getObjectToAdd();
if (objectToAdd != null) {
PrismObjectDefinition<ShadowType> objectDefinition = objectToAdd.getDefinition();
// TODO: remove constructor, use some factory method instead
base = new PrismObject<ShadowType>(objectToAdd.getElementName(), objectDefinition, getNotNullPrismContext());
base = syncDelta.computeChangedObject(base);
}
}
if (accDelta == null) {
// No change
setObjectNew(base);
return;
}
if (base == null && accDelta.isModify()) {
RefinedObjectClassDefinition rAccountDef = getRefinedAccountDefinition();
if (rAccountDef != null) {
base = (PrismObject<ShadowType>) rAccountDef.createBlankShadow();
}
}
setObjectNew(accDelta.computeChangedObject(base));
}
public void clearIntermediateResults() {
constructionDeltaSetTriple = null;
outboundConstruction = null;
squeezedAttributes = null;
}
/**
* Distribute the resource that's in the context into all the prism objects (old, new) and deltas.
* The resourceRef will not just contain the OID but also full resource object. This may optimize handling
* of the objects in upper layers (e.g. GUI).
*/
public void distributeResource() {
ResourceType resourceType = getResource();
if (resourceType == null) {
return;
}
PrismObject<ResourceType> resource = resourceType.asPrismObject();
distributeResourceObject(getObjectOld(), resource);
distributeResourceObject(getObjectCurrent(), resource);
distributeResourceObject(getObjectNew(), resource);
distributeResourceDelta(getPrimaryDelta(), resource);
distributeResourceDelta(getSecondaryDelta(), resource);
}
private void distributeResourceObject(PrismObject<ShadowType> object, PrismObject<ResourceType> resource) {
if (object == null) {
return;
}
PrismReference resourceRef = object.findReference(ShadowType.F_RESOURCE_REF);
if (resourceRef != null) {
distributeResourceValues(resourceRef.getValues(), resource);
}
}
private void distributeResourceValue(PrismReferenceValue resourceRefVal, PrismObject<ResourceType> resource) {
if (resourceRefVal != null) {
resourceRefVal.setObject(resource);
}
}
private void distributeResourceDelta(ObjectDelta<ShadowType> delta, PrismObject<ResourceType> resource) {
if (delta == null) {
return;
}
if (delta.isAdd()) {
distributeResourceObject(delta.getObjectToAdd(), resource);
} else if (delta.isModify()) {
ReferenceDelta referenceDelta = delta.findReferenceModification(ShadowType.F_RESOURCE_REF);
if (referenceDelta != null) {
distributeResourceValues(referenceDelta.getValuesToAdd(), resource);
distributeResourceValues(referenceDelta.getValuesToDelete(), resource);
distributeResourceValues(referenceDelta.getValuesToReplace(), resource);
}
} // Nothing to do for DELETE delta
}
private void distributeResourceValues(Collection<PrismReferenceValue> values, PrismObject<ResourceType> resource) {
if (values == null) {
return;
}
for(PrismReferenceValue pval: values) {
distributeResourceValue(pval, resource);
}
}
/**
* Returns delta suitable for execution. The primary and secondary deltas may not make complete sense all by themselves.
* E.g. they may both be MODIFY deltas even in case that the account should be created. The deltas begin to make sense
* only if combined with sync decision. This method provides the deltas all combined and ready for execution.
*/
public ObjectDelta<ShadowType> getExecutableDelta() throws SchemaException {
SynchronizationPolicyDecision policyDecision = getSynchronizationPolicyDecision();
ObjectDelta<ShadowType> origDelta = getDelta();
if (policyDecision == SynchronizationPolicyDecision.ADD) {
if (origDelta == null || origDelta.isModify()) {
// We need to convert modify delta to ADD
ObjectDelta<ShadowType> addDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.ADD, getPrismContext());
RefinedObjectClassDefinition rAccount = getRefinedAccountDefinition();
if (rAccount == null) {
throw new IllegalStateException("Definition for account type " + getResourceShadowDiscriminator()
+ " not found in the context, but it should be there");
}
PrismObject<ShadowType> newAccount = (PrismObject<ShadowType>) rAccount.createBlankShadow();
addDelta.setObjectToAdd(newAccount);
if (origDelta != null) {
addDelta.merge(origDelta);
}
return addDelta;
}
} else if (policyDecision == SynchronizationPolicyDecision.KEEP) {
// Any delta is OK
} else if (policyDecision == SynchronizationPolicyDecision.DELETE) {
ObjectDelta<ShadowType> deleteDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.DELETE, getPrismContext());
String oid = getOid();
if (oid == null) {
throw new IllegalStateException(
"Internal error: account context OID is null during attempt to create delete secondary delta; context="
+this);
}
deleteDelta.setOid(oid);
return deleteDelta;
} else {
// This is either UNLINK or null, both are in fact the same as KEEP
// Any delta is OK
}
return origDelta;
}
public void checkConsistence() {
checkConsistence(null, true, false);
}
public void checkConsistence(String contextDesc, boolean fresh, boolean force) {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
// No not check these. they may be quite wild.
return;
}
super.checkConsistence(contextDesc);
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return;
}
if (fresh && !force) {
if (resource == null) {
throw new IllegalStateException("Null resource in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
if (resourceShadowDiscriminator == null) {
throw new IllegalStateException("Null resource account type in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
}
if (syncDelta != null) {
try {
syncDelta.checkConsistence(true, true, true);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
} catch (IllegalStateException e) {
throw new IllegalStateException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
}
}
}
protected boolean isRequireSecondardyDeltaOid() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
return false;
}
if (getResourceShadowDiscriminator() != null && getResourceShadowDiscriminator().getOrder() > 0) {
// These may not have the OID yet
return false;
}
return super.isRequireSecondardyDeltaOid();
}
@Override
public void cleanup() {
super.cleanup();
synchronizationPolicyDecision = null;
// isLegal = null;
// isLegalOld = null;
isAssigned = false;
isActive = false;
}
@Override
public void normalize() {
super.normalize();
if (syncDelta != null) {
syncDelta.normalize();
}
}
@Override
public void reset() {
super.reset();
wave = -1;
fullShadow = false;
isAssigned = false;
isActive = false;
synchronizationPolicyDecision = null;
constructionDeltaSetTriple = null;
outboundConstruction = null;
dependencies = null;
squeezedAttributes = null;
accountPasswordPolicy = null;
}
@Override
public void adopt(PrismContext prismContext) throws SchemaException {
super.adopt(prismContext);
if (syncDelta != null) {
prismContext.adopt(syncDelta);
}
}
@Override
public LensProjectionContext clone(LensContext<? extends ObjectType> lensContext) {
LensProjectionContext clone = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
copyValues(clone, lensContext);
return clone;
}
protected void copyValues(LensProjectionContext clone, LensContext<? extends ObjectType> lensContext) {
super.copyValues(clone, lensContext);
// do NOT clone transient values such as accountConstructionDeltaSetTriple
// these are not meant to be cloned and they are also not directly clonnable
clone.dependencies = this.dependencies;
clone.doReconciliation = this.doReconciliation;
clone.fullShadow = this.fullShadow;
clone.isAssigned = this.isAssigned;
clone.outboundConstruction = this.outboundConstruction;
clone.synchronizationPolicyDecision = this.synchronizationPolicyDecision;
clone.resource = this.resource;
clone.resourceShadowDiscriminator = this.resourceShadowDiscriminator;
clone.squeezedAttributes = cloneSqueezedAttributes();
if (this.syncDelta != null) {
clone.syncDelta = this.syncDelta.clone();
}
clone.wave = this.wave;
}
private Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> cloneSqueezedAttributes() {
if (squeezedAttributes == null) {
return null;
}
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> clonedMap
= new HashMap<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>>();
Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>> cloner = new Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>>() {
@Override
public ItemValueWithOrigin<PrismPropertyValue<?>> clone(ItemValueWithOrigin<PrismPropertyValue<?>> original) {
return original.clone();
}
};
for (Entry<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> entry: squeezedAttributes.entrySet()) {
clonedMap.put(entry.getKey(), entry.getValue().clone(cloner));
}
return clonedMap;
}
/**
* Returns true if the projection has any value for specified attribute.
*/
public boolean hasValueForAttribute(QName attributeName) throws SchemaException {
ItemPath attrPath = new ItemPath(ShadowType.F_ATTRIBUTES, attributeName);
if (getObjectNew() != null) {
PrismProperty<?> attrNew = getObjectNew().findProperty(attrPath);
if (attrNew != null && !attrNew.isEmpty()) {
return true;
}
}
return false;
}
private boolean hasValueForAttribute(QName attributeName, Collection<PrismPropertyValue<Construction>> acPpvSet) {
if (acPpvSet == null) {
return false;
}
for (PrismPropertyValue<Construction> acPpv: acPpvSet) {
Construction ac = acPpv.getValue();
if (ac.hasValueForAttribute(attributeName)) {
return true;
}
}
return false;
}
public AccountOperation getOperation() {
if (isAdd()) {
return AccountOperation.ADD;
}
if (isDelete()) {
return AccountOperation.DELETE;
}
return AccountOperation.MODIFY;
}
@Override
public void checkEncrypted() {
super.checkEncrypted();
if (syncDelta != null) {
CryptoUtil.checkEncrypted(syncDelta);
}
}
public String getHumanReadableName() {
StringBuilder sb = new StringBuilder();
sb.append("account(");
String humanReadableAccountIdentifier = getHumanReadableIdentifier();
if (StringUtils.isEmpty(humanReadableAccountIdentifier)) {
sb.append("no ID");
} else {
sb.append("ID ");
sb.append(humanReadableAccountIdentifier);
}
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
sb.append(", type '");
sb.append(discr.getIntent());
sb.append("', ");
if (discr.getOrder() != 0) {
sb.append("order ").append(discr.getOrder()).append(", ");
}
} else {
sb.append(" (no discriminator) ");
}
sb.append(getResource());
sb.append(")");
return sb.toString();
}
private String getHumanReadableIdentifier() {
PrismObject<ShadowType> object = getObjectNew();
if (object == null) {
object = getObjectOld();
}
if (object == null) {
object = getObjectCurrent();
}
if (object == null) {
return null;
}
if (object.canRepresent(ShadowType.class)) {
PrismObject<ShadowType> shadow = (PrismObject<ShadowType>)object;
Collection<ResourceAttribute<?>> identifiers = ShadowUtil.getIdentifiers(shadow);
if (identifiers == null) {
return null;
}
StringBuilder sb = new StringBuilder();
Iterator<ResourceAttribute<?>> iterator = identifiers.iterator();
while (iterator.hasNext()) {
ResourceAttribute<?> id = iterator.next();
sb.append(id.toHumanReadableString());
if (iterator.hasNext()) {
sb.append(",");
}
}
return sb.toString();
} else {
return object.toString();
}
}
@Override
public String debugDump() {
return debugDump(0);
}
@Override
public String debugDump(int indent) {
return debugDump(indent, true);
}
public String debugDump(int indent, boolean showTriples) {
StringBuilder sb = new StringBuilder();
SchemaDebugUtil.indentDebugDump(sb, indent);
sb.append("PROJECTION ");
sb.append(getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName());
sb.append(" ");
sb.append(getResourceShadowDiscriminator());
if (resource != null) {
sb.append(" : ");
sb.append(resource.getName().getOrig());
}
sb.append("\n");
SchemaDebugUtil.indentDebugDump(sb, indent + 1);
sb.append("OID: ").append(getOid());
sb.append(", wave ").append(wave);
if (fullShadow) {
sb.append(", full");
} else {
sb.append(", shadow");
}
sb.append(", exists=").append(isExists);
sb.append(", assigned=").append(isAssigned);
sb.append(", active=").append(isActive);
sb.append(", legal=").append(isLegalOld).append("->").append(isLegal);
sb.append(", recon=").append(doReconciliation);
sb.append(", syncIntent=").append(getSynchronizationIntent());
sb.append(", decision=").append(synchronizationPolicyDecision);
if (!isFresh()) {
sb.append(", NOT FRESH");
}
if (resourceShadowDiscriminator != null && resourceShadowDiscriminator.isThombstone()) {
sb.append(", THOMBSTONE");
}
if (syncAbsoluteTrigger) {
sb.append(", SYNC TRIGGER");
}
if (getIteration() != 0) {
sb.append(", iteration=").append(getIteration()).append(" (").append(getIterationToken()).append(")");
}
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("old"), getObjectOld(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("current"), getObjectCurrent(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("new"), getObjectNew(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("primary delta"), getPrimaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("secondary delta"), getSecondaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("sync delta"), getSyncDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("executed deltas"), getExecutedDeltas(), indent+1);
if (showTriples) {
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("constructionDeltaSetTriple"), constructionDeltaSetTriple, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("outbound account construction"), outboundConstruction, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed attributes"), squeezedAttributes, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed associations"), squeezedAssociations, indent + 1);
// This is just a debug thing
// sb.append("\n");
// DebugUtil.indentDebugDump(sb, indent);
// sb.append("ACCOUNT dependencies\n");
// sb.append(DebugUtil.debugDump(dependencies, indent + 1));
}
return sb.toString();
}
@Override
protected String getElementDefaultDesc() {
return "projection";
}
@Override
public String toString() {
return "LensProjectionContext(" + (getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName()) + ":" + getOid() +
( resource == null ? "" : " on " + resource ) + ")";
}
/**
* Return a human readable name of the projection object suitable for logs.
*/
public String toHumanReadableString() {
if (resourceShadowDiscriminator == null) {
return "(null" + resource + ")";
}
if (resource != null) {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resource + ")";
} else {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resourceShadowDiscriminator.getResourceOid() + ")";
}
}
public String getHumanReadableKind() {
if (resourceShadowDiscriminator == null) {
return "resource object";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
private String getKindValue(ShadowKindType kind) {
if (kind == null) {
return "null";
}
return kind.value();
}
@Override
protected String getElementDesc() {
if (resourceShadowDiscriminator == null) {
return "shadow";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
public void addToPrismContainer(PrismContainer<LensProjectionContextType> lensProjectionContextTypeContainer) throws SchemaException {
LensProjectionContextType lensProjectionContextType = lensProjectionContextTypeContainer.createNewValue().asContainerable();
super.storeIntoLensElementContextType(lensProjectionContextType);
lensProjectionContextType.setSyncDelta(syncDelta != null ? DeltaConvertor.toObjectDeltaType(syncDelta) : null);
lensProjectionContextType.setWave(wave);
lensProjectionContextType.setResourceShadowDiscriminator(resourceShadowDiscriminator != null ?
resourceShadowDiscriminator.toResourceShadowDiscriminatorType() : null);
lensProjectionContextType.setFullShadow(fullShadow);
lensProjectionContextType.setIsAssigned(isAssigned);
lensProjectionContextType.setIsActive(isActive);
lensProjectionContextType.setIsLegal(isLegal);
lensProjectionContextType.setIsLegalOld(isLegalOld);
lensProjectionContextType.setIsExists(isExists);
lensProjectionContextType.setSynchronizationPolicyDecision(synchronizationPolicyDecision != null ? synchronizationPolicyDecision.toSynchronizationPolicyDecisionType() : null);
lensProjectionContextType.setDoReconciliation(doReconciliation);
lensProjectionContextType.setSynchronizationSituationDetected(synchronizationSituationDetected);
lensProjectionContextType.setSynchronizationSituationResolved(synchronizationSituationResolved);
lensProjectionContextType.setAccountPasswordPolicy(accountPasswordPolicy);
lensProjectionContextType.setSyncAbsoluteTrigger(syncAbsoluteTrigger);
}
public static LensProjectionContext fromLensProjectionContextType(LensProjectionContextType projectionContextType, LensContext lensContext, OperationResult result) throws SchemaException, ConfigurationException, ObjectNotFoundException, CommunicationException {
String objectTypeClassString = projectionContextType.getObjectTypeClass();
if (StringUtils.isEmpty(objectTypeClassString)) {
throw new SystemException("Object type class is undefined in LensProjectionContextType");
}
ResourceShadowDiscriminator resourceShadowDiscriminator = ResourceShadowDiscriminator.fromResourceShadowDiscriminatorType(projectionContextType.getResourceShadowDiscriminator());
LensProjectionContext projectionContext = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
projectionContext.retrieveFromLensElementContextType(projectionContextType, result);
if (projectionContextType.getSyncDelta() != null) {
projectionContext.syncDelta = DeltaConvertor.createObjectDelta(projectionContextType.getSyncDelta(), lensContext.getPrismContext());
} else {
projectionContext.syncDelta = null;
}
projectionContext.wave = projectionContextType.getWave() != null ? projectionContextType.getWave() : 0;
projectionContext.fullShadow = projectionContextType.isFullShadow() != null ? projectionContextType.isFullShadow() : false;
projectionContext.isAssigned = projectionContextType.isIsAssigned() != null ? projectionContextType.isIsAssigned() : false;
projectionContext.isActive = projectionContextType.isIsActive() != null ? projectionContextType.isIsActive() : false;
projectionContext.isLegal = projectionContextType.isIsLegal();
projectionContext.isExists = projectionContextType.isIsExists() != null ? projectionContextType.isIsExists() : false;
projectionContext.synchronizationPolicyDecision = SynchronizationPolicyDecision.fromSynchronizationPolicyDecisionType(projectionContextType.getSynchronizationPolicyDecision());
projectionContext.doReconciliation = projectionContextType.isDoReconciliation() != null ? projectionContextType.isDoReconciliation() : false;
projectionContext.synchronizationSituationDetected = projectionContextType.getSynchronizationSituationDetected();
projectionContext.synchronizationSituationResolved = projectionContextType.getSynchronizationSituationResolved();
projectionContext.accountPasswordPolicy = projectionContextType.getAccountPasswordPolicy();
projectionContext.syncAbsoluteTrigger = projectionContextType.isSyncAbsoluteTrigger();
return projectionContext;
}
// determines whether full shadow is present, based on operation result got from provisioning
public void determineFullShadowFlag(OperationResultType fetchResult) {
if (fetchResult != null
&& (fetchResult.getStatus() == OperationResultStatusType.PARTIAL_ERROR
|| fetchResult.getStatus() == OperationResultStatusType.FATAL_ERROR)) { // todo what about other kinds of status? [e.g. in-progress]
setFullShadow(false);
} else {
setFullShadow(true);
}
}
}
| sabriarabacioglu/engerek | model/model-impl/src/main/java/com/evolveum/midpoint/model/impl/lens/LensProjectionContext.java | Java | apache-2.0 | 42,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.kafka;
import static org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult;
import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem;
import static org.hamcrest.Matchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
import org.apache.beam.sdk.Pipeline.PipelineExecutionException;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.coders.BigEndianIntegerCoder;
import org.apache.beam.sdk.coders.BigEndianLongCoder;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.InstantCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.apache.beam.sdk.io.Read;
import org.apache.beam.sdk.io.UnboundedSource;
import org.apache.beam.sdk.io.UnboundedSource.UnboundedReader;
import org.apache.beam.sdk.io.kafka.serialization.InstantDeserializer;
import org.apache.beam.sdk.metrics.GaugeResult;
import org.apache.beam.sdk.metrics.MetricName;
import org.apache.beam.sdk.metrics.MetricNameFilter;
import org.apache.beam.sdk.metrics.MetricQueryResults;
import org.apache.beam.sdk.metrics.MetricResult;
import org.apache.beam.sdk.metrics.MetricsFilter;
import org.apache.beam.sdk.metrics.SinkMetrics;
import org.apache.beam.sdk.metrics.SourceMetrics;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.testing.PAssert;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Distinct;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.Max;
import org.apache.beam.sdk.transforms.Min;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.Values;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.MockProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.utils.Utils;
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
import org.hamcrest.collection.IsIterableWithSize;
import org.joda.time.Instant;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of {@link KafkaIO}.
* Run with 'mvn test -Dkafka.clients.version=0.10.1.1',
* or 'mvn test -Dkafka.clients.version=0.9.0.1' for either Kafka client version.
*/
@RunWith(JUnit4.class)
public class KafkaIOTest {
private static final Logger LOG = LoggerFactory.getLogger(KafkaIOTest.class);
/*
* The tests below borrow code and structure from CountingSourceTest. In addition verifies
* the reader interleaves the records from multiple partitions.
*
* Other tests to consider :
* - test KafkaRecordCoder
*/
@Rule
public final transient TestPipeline p = TestPipeline.create();
@Rule
public ExpectedException thrown = ExpectedException.none();
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(
List<String> topics, int partitionsPerTopic, int numElements,
OffsetResetStrategy offsetResetStrategy) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
records.get(tp).add(
new ConsumerRecord<>(
tp.topic(),
tp.partition(),
offsets[pIdx]++,
ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key is 4 byte record id
ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value is 8 byte record id
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions =
new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer =
new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
// override assign() in order to set offset limits & to save assigned partitions.
//remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as:
//1. SpEL can find this function, either input is List or Collection;
//2. List extends Collection, so super.assign() could find either assign(List)
// or assign(Collection).
public void assign(final List<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
// Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have
// this method.
// Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions
// does not have the OffsetAndTimestamp class. So return a raw type and use reflection
// here.
@SuppressWarnings("unchecked")
public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
HashMap<TopicPartition, Object> result = new HashMap<>();
try {
Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp");
// OffsetAndTimestamp(long offset, long timestamp)
Constructor constructor = cls.getDeclaredConstructor(long.class, long.class);
// In test scope, timestamp == offset.
for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
long maxOffset = offsets[partitions.indexOf(entry.getKey())];
Long offset = entry.getValue();
if (offset >= maxOffset) {
offset = null;
}
result.put(
entry.getKey(), constructor.newInstance(entry.getValue(), offset));
}
return result;
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
}
}
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
private static class ConsumerFactoryFn
implements SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> {
private final List<String> topics;
private final int partitionsPerTopic;
private final int numElements;
private final OffsetResetStrategy offsetResetStrategy;
public ConsumerFactoryFn(List<String> topics,
int partitionsPerTopic,
int numElements,
OffsetResetStrategy offsetResetStrategy) {
this.topics = topics;
this.partitionsPerTopic = partitionsPerTopic;
this.numElements = numElements;
this.offsetResetStrategy = offsetResetStrategy;
}
@Override
public Consumer<byte[], byte[]> apply(Map<String, Object> config) {
return mkMockConsumer(topics, partitionsPerTopic, numElements, offsetResetStrategy);
}
}
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
return mkKafkaReadTransform(numElements, numElements, timestampFn);
}
/**
* Creates a consumer with two topics, with 10 partitions each.
* numElements are (round-robin) assigned all the 20 partitions.
*/
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
int maxNumRecords,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
List<String> topics = ImmutableList.of("topic_a", "topic_b");
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(maxNumRecords);
if (timestampFn != null) {
return reader.withTimestampFn(timestampFn);
} else {
return reader;
}
}
private static class AssertMultipleOf implements SerializableFunction<Iterable<Long>, Void> {
private final int num;
public AssertMultipleOf(int num) {
this.num = num;
}
@Override
public Void apply(Iterable<Long> values) {
for (Long v : values) {
assertEquals(0, v % num);
}
return null;
}
}
public static void addCountingAsserts(PCollection<Long> input, long numElements) {
// Count == numElements
// Unique count == numElements
// Min == 0
// Max == numElements-1
addCountingAsserts(input, numElements, numElements, 0L, numElements - 1);
}
public static void addCountingAsserts(
PCollection<Long> input, long count, long uniqueCount, long min, long max) {
PAssert
.thatSingleton(input.apply("Count", Count.<Long>globally()))
.isEqualTo(count);
PAssert
.thatSingleton(input.apply(Distinct.<Long>create())
.apply("UniqueCount", Count.<Long>globally()))
.isEqualTo(uniqueCount);
PAssert
.thatSingleton(input.apply("Min", Min.<Long>globally()))
.isEqualTo(min);
PAssert
.thatSingleton(input.apply("Max", Max.<Long>globally()))
.isEqualTo(max);
}
@Test
public void testUnboundedSource() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnreachableKafkaBrokers() {
// Expect an exception when the Kafka brokers are not reachable on the workers.
// We specify partitions explicitly so that splitting does not involve server interaction.
// Set request timeout to 10ms so that test does not take long.
thrown.expect(Exception.class);
thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'");
int numElements = 1000;
PCollection<Long> input = p
.apply(KafkaIO.<Integer, Long>read()
.withBootstrapServers("8.8.8.8:9092") // Google public DNS ip.
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0)))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.updateConsumerProperties(ImmutableMap.<String, Object>of(
ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10,
ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8,
ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8))
.withMaxNumRecords(10)
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithSingleTopic() {
// same as testUnboundedSource, but with single topic
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(numElements)
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithExplicitPartitions() {
int numElements = 1000;
List<String> topics = ImmutableList.of("test");
KafkaIO.Read<byte[], Long> reader = KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements / 10);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
// assert that every element is a multiple of 5.
PAssert
.that(input)
.satisfies(new AssertMultipleOf(5));
PAssert
.thatSingleton(input.apply(Count.<Long>globally()))
.isEqualTo(numElements / 10L);
p.run();
}
private static class ElementValueDiff extends DoFn<Long, Long> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
c.output(c.element() - c.timestamp().getMillis());
}
}
@Test
public void testUnboundedSourceTimestamps() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
PCollection<Long> diffs = input
.apply("TimestampDiff", ParDo.of(new ElementValueDiff()))
.apply("DistinctTimestamps", Distinct.<Long>create());
// This assert also confirms that diffs only has one unique value.
PAssert.thatSingleton(diffs).isEqualTo(0L);
p.run();
}
private static class RemoveKafkaMetadata<K, V> extends DoFn<KafkaRecord<K, V>, KV<K, V>> {
@ProcessElement
public void processElement(ProcessContext ctx) throws Exception {
ctx.output(ctx.element().getKV());
}
}
@Test
public void testUnboundedSourceSplits() throws Exception {
int numElements = 1000;
int numSplits = 10;
// Coders must be specified explicitly here due to the way the transform
// is used in the test.
UnboundedSource<KafkaRecord<Integer, Long>, ?> initial =
mkKafkaReadTransform(numElements, null)
.withKeyDeserializerAndCoder(IntegerDeserializer.class, BigEndianIntegerCoder.of())
.withValueDeserializerAndCoder(LongDeserializer.class, BigEndianLongCoder.of())
.makeSource();
List<? extends UnboundedSource<KafkaRecord<Integer, Long>, ?>> splits =
initial.split(numSplits, p.getOptions());
assertEquals("Expected exact splitting", numSplits, splits.size());
long elementsPerSplit = numElements / numSplits;
assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits);
PCollectionList<Long> pcollections = PCollectionList.empty(p);
for (int i = 0; i < splits.size(); ++i) {
pcollections = pcollections.and(
p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit))
.apply("Remove Metadata " + i, ParDo.of(new RemoveKafkaMetadata<Integer, Long>()))
.apply("collection " + i, Values.<Long>create()));
}
PCollection<Long> input = pcollections.apply(Flatten.<Long>pCollections());
addCountingAsserts(input, numElements);
p.run();
}
/**
* A timestamp function that uses the given value as the timestamp.
*/
private static class ValueAsTimestampFn
implements SerializableFunction<KV<Integer, Long>, Instant> {
@Override
public Instant apply(KV<Integer, Long> input) {
return new Instant(input.getValue());
}
}
// Kafka records are read in a separate thread inside the reader. As a result advance() might not
// read any records even from the mock consumer, especially for the first record.
// This is a helper method to loop until we read a record.
private static void advanceOnce(UnboundedReader<?> reader, boolean isStarted) throws IOException {
if (!isStarted && reader.start()) {
return;
}
while (!reader.advance()) {
// very rarely will there be more than one attempts.
// In case of a bug we might end up looping forever, and test will fail with a timeout.
// Avoid hard cpu spinning in case of a test failure.
try {
Thread.sleep(1);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testUnboundedSourceCheckpointMark() throws Exception {
int numElements = 85; // 85 to make sure some partitions have more records than other.
// create a single split:
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
final int numToSkip = 20; // one from each partition.
// advance numToSkip elements
for (int i = 0; i < numToSkip; ++i) {
advanceOnce(reader, i > 0);
}
// Confirm that we get the expected element in sequence before checkpointing.
assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue());
assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis());
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
reader = source.createReader(null, mark);
// Confirm that we get the next elements in sequence.
// This also confirms that Reader interleaves records from each partitions by the reader.
for (int i = numToSkip; i < numElements; i++) {
advanceOnce(reader, i > numToSkip);
assertEquals(i, (long) reader.getCurrent().getKV().getValue());
assertEquals(i, reader.getCurrentTimestamp().getMillis());
}
}
@Test
public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception {
// Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes
// properly from empty partitions, without missing messages added since checkpoint.
// Initialize consumer with fewer elements than number of partitions so that some are empty.
int initialNumElements = 5;
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
for (int l = 0; l < initialNumElements; ++l) {
advanceOnce(reader, l > 0);
}
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
// Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that
// the reader need to explicitly need to seek to first offset for partitions that were empty.
int numElements = 100; // all the 20 partitions will have elements
List<String> topics = ImmutableList.of("topic_a", "topic_b");
source = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.LATEST))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements)
.withTimestampFn(new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
reader = source.createReader(null, mark);
// Verify in any order. As the partitions are unevenly read, the returned records are not in a
// simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder.
List<Long> expected = new ArrayList<>();
List<Long> actual = new ArrayList<>();
for (long i = initialNumElements; i < numElements; i++) {
advanceOnce(reader, i > initialNumElements);
expected.add(i);
actual.add(reader.getCurrent().getKV().getValue());
}
assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray()));
}
@Test
public void testUnboundedSourceMetrics() {
int numElements = 1000;
String readStep = "readFromKafka";
p.apply(readStep,
mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata());
PipelineResult result = p.run();
String splitId = "0";
MetricName elementsRead = SourceMetrics.elementsRead().getName();
MetricName elementsReadBySplit = SourceMetrics.elementsReadBySplit(splitId).getName();
MetricName bytesRead = SourceMetrics.bytesRead().getName();
MetricName bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId).getName();
MetricName backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId).getName();
MetricName backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId).getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder().build());
Iterable<MetricResult<Long>> counters = metrics.counters();
assertThat(counters, hasItem(attemptedMetricsResult(
elementsRead.namespace(),
elementsRead.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
elementsReadBySplit.namespace(),
elementsReadBySplit.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesRead.namespace(),
bytesRead.name(),
readStep,
12000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesReadBySplit.namespace(),
bytesReadBySplit.name(),
readStep,
12000L)));
MetricQueryResults backlogElementsMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogElementsOfSplit.namespace(),
backlogElementsOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogElementsMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
MetricQueryResults backlogBytesMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogBytesOfSplit.namespace(),
backlogBytesOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogBytesMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
}
@Test
public void testSink() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the records
// are correctly published to mock kafka producer.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testValuesSink() throws Exception {
// similar to testSink(), but use values()' interface.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create()) // there are no keys
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
.values());
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true);
}
}
@Test
public void testEOSink() {
// testSink() with EOS enabled.
// This does not actually inject retries in a stage to test exactly-once-semantics.
// It mainly exercises the code in normal flow without retries.
// Ideally we should test EOS Sink by triggering replays of a messages between stages.
// It is not feasible to test such retries with direct runner. When DoFnTester supports
// state, we can test KafkaEOWriter DoFn directly to ensure it handles retries correctly.
if (!ProducerSpEL.supportsTransactions()) {
LOG.warn("testEOSink() is disabled as Kafka client version does not support transactions.");
return;
}
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withEOS(1, "test")
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST))
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testSinkWithSendErrors() throws Throwable {
// similar to testSink(), except that up to 10 of the send calls to producer will fail
// asynchronously.
// TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail.
// We limit the number of errors injected to 10 below. This would reflect a real streaming
// pipeline. But I am sure how to achieve that. For now expect an exception:
thrown.expect(InjectedErrorException.class);
thrown.expectMessage("Injected Error #1");
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThreadWithErrors =
new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
try {
p.run();
} catch (PipelineExecutionException e) {
// throwing inner exception helps assert that first exception is thrown from the Sink
throw e.getCause().getCause();
} finally {
completionThreadWithErrors.shutdown();
}
}
}
@Test
public void testUnboundedSourceStartReadTime() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can read half elements.
int startTime = numElements / 20 / 2;
int maxNumRecords = numElements / 2;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, maxNumRecords, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, maxNumRecords, maxNumRecords, maxNumRecords, numElements - 1);
p.run();
}
@Rule public ExpectedException noMessagesException = ExpectedException.none();
@Test
public void testUnboundedSourceStartReadTimeException() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
noMessagesException.expect(RuntimeException.class);
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can not read any element.
int startTime = numElements / 20;
p.apply(mkKafkaReadTransform(numElements, numElements, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
p.run();
}
@Test
public void testSourceDisplayData() {
KafkaIO.Read<Integer, Long> read = mkKafkaReadTransform(10, null);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topics", "topic_a,topic_b"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
KafkaIO.Read<byte[], Long> read = KafkaIO.<byte[], Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5),
new TopicPartition("test", 6)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSinkDisplayData() {
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
KafkaIO.Write<Integer, Long> write = KafkaIO.<Integer, Long>write()
.withBootstrapServers("myServerA:9092,myServerB:9092")
.withTopic("myTopic")
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey));
DisplayData displayData = DisplayData.from(write);
assertThat(displayData, hasDisplayItem("topic", "myTopic"));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092"));
assertThat(displayData, hasDisplayItem("retries", 3));
}
}
// interface for testing coder inference
private interface DummyInterface<T> {
}
// interface for testing coder inference
private interface DummyNonparametricInterface {
}
// class for testing coder inference
private static class DeserializerWithInterfaces
implements DummyInterface<String>, DummyNonparametricInterface,
Deserializer<Long> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Long deserialize(String topic, byte[] bytes) {
return 0L;
}
@Override
public void close() {
}
}
// class for which a coder cannot be infered
private static class NonInferableObject {
}
// class for testing coder inference
private static class NonInferableObjectDeserializer
implements Deserializer<NonInferableObject> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public NonInferableObject deserialize(String topic, byte[] bytes) {
return new NonInferableObject();
}
@Override
public void close() {
}
}
@Test
public void testInferKeyCoder() {
CoderRegistry registry = CoderRegistry.createDefault();
assertTrue(KafkaIO.inferCoder(registry, LongDeserializer.class).getValueCoder()
instanceof VarLongCoder);
assertTrue(KafkaIO.inferCoder(registry, StringDeserializer.class).getValueCoder()
instanceof StringUtf8Coder);
assertTrue(KafkaIO.inferCoder(registry, InstantDeserializer.class).getValueCoder()
instanceof InstantCoder);
assertTrue(KafkaIO.inferCoder(registry, DeserializerWithInterfaces.class).getValueCoder()
instanceof VarLongCoder);
}
@Rule public ExpectedException cannotInferException = ExpectedException.none();
@Test
public void testInferKeyCoderFailure() throws Exception {
cannotInferException.expect(RuntimeException.class);
CoderRegistry registry = CoderRegistry.createDefault();
KafkaIO.inferCoder(registry, NonInferableObjectDeserializer.class);
}
@Test
public void testSinkMetrics() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the metrics are reported.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply("writeToKafka", KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
PipelineResult result = p.run();
MetricName elementsWritten = SinkMetrics.elementsWritten().getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.inNamespace(elementsWritten.namespace()))
.build());
assertThat(metrics.counters(), hasItem(
attemptedMetricsResult(
elementsWritten.namespace(),
elementsWritten.name(),
"writeToKafka",
1000L)));
completionThread.shutdown();
}
}
private static void verifyProducerRecords(MockProducer<Integer, Long> mockProducer,
String topic, int numElements, boolean keyIsAbsent) {
// verify that appropriate messages are written to kafka
List<ProducerRecord<Integer, Long>> sent = mockProducer.history();
// sort by values
Collections.sort(sent, new Comparator<ProducerRecord<Integer, Long>>() {
@Override
public int compare(ProducerRecord<Integer, Long> o1, ProducerRecord<Integer, Long> o2) {
return Long.compare(o1.value(), o2.value());
}
});
for (int i = 0; i < numElements; i++) {
ProducerRecord<Integer, Long> record = sent.get(i);
assertEquals(topic, record.topic());
if (keyIsAbsent) {
assertNull(record.key());
} else {
assertEquals(i, record.key().intValue());
}
assertEquals(i, record.value().longValue());
}
}
/**
* This wrapper over MockProducer. It also places the mock producer in global MOCK_PRODUCER_MAP.
* The map is needed so that the producer returned by ProducerFactoryFn during pipeline can be
* used in verification after the test. We also override {@code flush()} method in MockProducer
* so that test can control behavior of {@code send()} method (e.g. to inject errors).
*/
private static class MockProducerWrapper implements AutoCloseable {
final String producerKey;
final MockProducer<Integer, Long> mockProducer;
// MockProducer has "closed" method starting version 0.11.
private static Method closedMethod;
static {
try {
closedMethod = MockProducer.class.getMethod("closed");
} catch (NoSuchMethodException e) {
closedMethod = null;
}
}
MockProducerWrapper() {
producerKey = String.valueOf(ThreadLocalRandom.current().nextLong());
mockProducer = new MockProducer<Integer, Long>(
false, // disable synchronous completion of send. see ProducerSendCompletionThread below.
new IntegerSerializer(),
new LongSerializer()) {
// override flush() so that it does not complete all the waiting sends, giving a chance to
// ProducerCompletionThread to inject errors.
@Override
public void flush() {
while (completeNext()) {
// there are some uncompleted records. let the completion thread handle them.
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
};
// Add the producer to the global map so that producer factory function can access it.
assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer));
}
public void close() {
MOCK_PRODUCER_MAP.remove(producerKey);
try {
if (closedMethod == null || !((Boolean) closedMethod.invoke(mockProducer))) {
mockProducer.close();
}
} catch (Exception e) { // Not expected.
throw new RuntimeException(e);
}
}
}
private static final ConcurrentMap<String, MockProducer<Integer, Long>> MOCK_PRODUCER_MAP =
new ConcurrentHashMap<>();
private static class ProducerFactoryFn
implements SerializableFunction<Map<String, Object>, Producer<Integer, Long>> {
final String producerKey;
ProducerFactoryFn(String producerKey) {
this.producerKey = producerKey;
}
@SuppressWarnings("unchecked")
@Override
public Producer<Integer, Long> apply(Map<String, Object> config) {
// Make sure the config is correctly set up for serializers.
// There may not be a key serializer if we're interested only in values.
if (config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) != null) {
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, true);
}
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, false);
// Returning same producer in each instance in a pipeline seems to work fine currently.
// If DirectRunner creates multiple DoFn instances for sinks, we might need to handle
// it appropriately. I.e. allow multiple producers for each producerKey and concatenate
// all the messages written to each producer for verification after the pipeline finishes.
return MOCK_PRODUCER_MAP.get(producerKey);
}
}
private static class InjectedErrorException extends RuntimeException {
InjectedErrorException(String message) {
super(message);
}
}
/**
* We start MockProducer with auto-completion disabled. That implies a record is not marked sent
* until #completeNext() is called on it. This class starts a thread to asynchronously 'complete'
* the the sends. During completion, we can also make those requests fail. This error injection
* is used in one of the tests.
*/
private static class ProducerSendCompletionThread {
private final MockProducer<Integer, Long> mockProducer;
private final int maxErrors;
private final int errorFrequency;
private final AtomicBoolean done = new AtomicBoolean(false);
private final ExecutorService injectorThread;
private int numCompletions = 0;
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer) {
// complete everything successfully
this(mockProducer, 0, 0);
}
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer,
int maxErrors,
int errorFrequency) {
this.mockProducer = mockProducer;
this.maxErrors = maxErrors;
this.errorFrequency = errorFrequency;
injectorThread = Executors.newSingleThreadExecutor();
}
ProducerSendCompletionThread start() {
injectorThread.submit(new Runnable() {
@Override
public void run() {
int errorsInjected = 0;
while (!done.get()) {
boolean successful;
if (errorsInjected < maxErrors && ((numCompletions + 1) % errorFrequency) == 0) {
successful = mockProducer.errorNext(
new InjectedErrorException("Injected Error #" + (errorsInjected + 1)));
if (successful) {
errorsInjected++;
}
} else {
successful = mockProducer.completeNext();
}
if (successful) {
numCompletions++;
} else {
// wait a bit since there are no unsent records
try {
Thread.sleep(1);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
}
});
return this;
}
void shutdown() {
done.set(true);
injectorThread.shutdown();
try {
assertTrue(injectorThread.awaitTermination(10, TimeUnit.SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}
| wangyum/beam | sdks/java/io/kafka/src/test/java/org/apache/beam/sdk/io/kafka/KafkaIOTest.java | Java | apache-2.0 | 49,154 |
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file implements a test for CHIP Callback
*
*/
#include <lib/core/CHIPCallback.h>
#include <lib/support/CHIPMem.h>
#include <lib/support/UnitTestRegistration.h>
#include <nlunit-test.h>
using namespace chip::Callback;
/**
* An example Callback registrar. Resumer::Resume() accepts Callbacks
* to be run during the next call to Resumer::Dispatch(). In an environment
* completely driven by callbacks, an application's main() would just call
* something like Resumer::Dispatch() in a loop.
*/
class Resumer : private CallbackDeque
{
public:
/**
* @brief run this callback next Dispatch
*/
void Resume(Callback<> * cb)
{
// always first thing: cancel to take ownership of
// cb members
Enqueue(cb->Cancel());
}
void Dispatch()
{
Cancelable ready;
DequeueAll(ready);
// runs the ready list
while (ready.mNext != &ready)
{
Callback<> * cb = Callback<>::FromCancelable(ready.mNext);
// one-shot semantics
cb->Cancel();
cb->mCall(cb->mContext);
}
}
};
static void increment(int * v)
{
(*v)++;
}
struct Resume
{
Callback<> * cb;
Resumer * resumer;
};
static void resume(struct Resume * me)
{
me->resumer->Resume(me->cb);
}
static void canceler(Cancelable * ca)
{
ca->Cancel();
}
static void ResumerTest(nlTestSuite * inSuite, void * inContext)
{
int n = 1;
Callback<> cb(reinterpret_cast<CallFn>(increment), &n);
Callback<> cancelcb(reinterpret_cast<CallFn>(canceler), cb.Cancel());
Resumer resumer;
// Resume() works
resumer.Resume(&cb);
resumer.Dispatch();
resumer.Resume(&cb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 3);
n = 1;
// test cb->Cancel() cancels
resumer.Resume(&cb);
cb.Cancel();
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 1);
n = 1;
// Cancel cb before Dispatch() gets around to us (tests FIFO *and* cancel() from readylist)
resumer.Resume(&cancelcb);
resumer.Resume(&cb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 1);
n = 1;
// 2nd Resume() cancels first registration
resumer.Resume(&cb);
resumer.Resume(&cb); // cancels previous registration
resumer.Dispatch(); // runs the list
resumer.Dispatch(); // runs an empty list
NL_TEST_ASSERT(inSuite, n == 2);
n = 1;
// Resume() during Dispatch() runs only once, but enqueues for next dispatch
struct Resume res = { .cb = &cb, .resumer = &resumer };
Callback<> resumecb(reinterpret_cast<CallFn>(resume), &res);
resumer.Resume(&cb);
resumer.Resume(&resumecb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 3);
Callback<> * pcb = chip::Platform::New<Callback<>>(reinterpret_cast<CallFn>(increment), &n);
n = 1;
// cancel on destruct
resumer.Resume(pcb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
resumer.Resume(pcb);
chip::Platform::Delete(pcb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
}
/**
* An example Callback registrar. Notifier implements persistently-registered
* semantics, and uses Callbacks with a non-default signature.
*/
class Notifier : private CallbackDeque
{
public:
typedef void (*NotifyFn)(void *, int);
/**
* run all the callers
*/
void Notify(int v)
{
for (Cancelable * ca = mNext; ca != this; ca = ca->mNext)
{
// persistent registration semantics, with data
Callback<NotifyFn> * cb = Callback<NotifyFn>::FromCancelable(ca);
cb->mCall(cb->mContext, v);
}
}
/**
* @brief example
*/
static void Cancel(Cancelable * cb)
{
Dequeue(cb); // take off ready list
}
/**
* @brief illustrate a case where this needs notification of cancellation
*/
void Register(Callback<NotifyFn> * cb) { Enqueue(cb->Cancel(), Cancel); }
};
static void increment_by(int * n, int by)
{
*n += by;
}
static void NotifierTest(nlTestSuite * inSuite, void * inContext)
{
int n = 1;
Callback<Notifier::NotifyFn> cb(reinterpret_cast<Notifier::NotifyFn>(increment_by), &n);
Callback<Notifier::NotifyFn> cancelcb(reinterpret_cast<Notifier::NotifyFn>(canceler), cb.Cancel());
// safe to call anytime
cb.Cancel();
Notifier notifier;
// Simple stuff works, e.g. and there's persistent registration
notifier.Register(&cb);
notifier.Notify(1);
notifier.Notify(8);
NL_TEST_ASSERT(inSuite, n == 10);
n = 1;
// Cancel cb before Dispatch() gets around to us (tests FIFO *and* cancel() from readylist)
notifier.Register(&cancelcb);
notifier.Register(&cb);
notifier.Notify(8);
NL_TEST_ASSERT(inSuite, n == 1);
cb.Cancel();
cancelcb.Cancel();
}
/**
* Set up the test suite.
*/
int TestCHIPCallback_Setup(void * inContext)
{
CHIP_ERROR error = chip::Platform::MemoryInit();
if (error != CHIP_NO_ERROR)
return FAILURE;
return SUCCESS;
}
/**
* Tear down the test suite.
*/
int TestCHIPCallback_Teardown(void * inContext)
{
chip::Platform::MemoryShutdown();
return SUCCESS;
}
/**
* Test Suite. It lists all the test functions.
*/
// clang-format off
static const nlTest sTests[] =
{
NL_TEST_DEF("ResumerTest", ResumerTest),
NL_TEST_DEF("NotifierTest", NotifierTest),
NL_TEST_SENTINEL()
};
// clang-format on
int TestCHIPCallback(void)
{
// clang-format off
nlTestSuite theSuite =
{
"CHIPCallback",
&sTests[0],
TestCHIPCallback_Setup,
TestCHIPCallback_Teardown
};
// clang-format on
nlTestRunner(&theSuite, nullptr);
return (nlTestRunnerStats(&theSuite));
}
CHIP_REGISTER_TEST_SUITE(TestCHIPCallback)
| project-chip/connectedhomeip | src/lib/core/tests/TestCHIPCallback.cpp | C++ | apache-2.0 | 6,584 |
/*
* Copyright 2017 Exorath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.exorath.plugin.game.cakewars.rewards;
import com.exorath.plugin.game.cakewars.Main;
import com.exorath.service.currency.api.CurrencyServiceAPI;
import com.exorath.victoryHandler.rewards.CurrencyReward;
import net.md_5.bungee.api.ChatColor;
/**
* Created by toonsev on 5/31/2017.
*/
public class KillsReward extends CurrencyReward{
public static final int CRUMBS_PER_KILL = 2;
private int kills;
public KillsReward(CurrencyServiceAPI currencyServiceAPI) {
super(null, currencyServiceAPI, Main.CRUMBS_CURRENCY, 0);
setCurrencyColor(ChatColor.GOLD);
setCurrencyName("Crumbs");
}
public void addKill(){
kills++;
setAmount(kills*CRUMBS_PER_KILL);
setReason("Killing " + kills + " Players");
}
}
| Exorath/CakeWarsGamePlugin | src/main/java/com/exorath/plugin/game/cakewars/rewards/KillsReward.java | Java | apache-2.0 | 1,398 |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
| DG-i/openshift-ansible | roles/lib_openshift/library/oc_adm_registry.py | Python | apache-2.0 | 94,103 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.irc;
import java.util.ArrayList;
import java.util.Dictionary;
import java.util.Hashtable;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.schwering.irc.lib.IRCConnection;
import org.schwering.irc.lib.IRCEventAdapter;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class IrcEndpointTest {
private IrcComponent component;
private IrcConfiguration configuration;
private IRCConnection connection;
private IrcEndpoint endpoint;
@Before
public void doSetup() {
component = mock(IrcComponent.class);
configuration = mock(IrcConfiguration.class);
connection = mock(IRCConnection.class);
List<String> channels = new ArrayList<String>();
Dictionary<String, String> keys = new Hashtable<String, String>();
channels.add("chan1");
channels.add("chan2");
keys.put("chan1", "");
keys.put("chan2", "chan2key");
when(configuration.getChannels()).thenReturn(channels);
when(configuration.getKey("chan1")).thenReturn("");
when(configuration.getKey("chan2")).thenReturn("chan2key");
when(component.getIRCConnection(configuration)).thenReturn(connection);
endpoint = new IrcEndpoint("foo", component, configuration);
}
@Test
public void doJoinChannelTestNoKey() throws Exception {
endpoint.joinChannel("chan1");
verify(connection).doJoin("chan1");
}
@Test
public void doJoinChannelTestKey() throws Exception {
endpoint.joinChannel("chan2");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doJoinChannels() throws Exception {
endpoint.joinChannels();
verify(connection).doJoin("chan1");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doHandleIrcErrorNickInUse() throws Exception {
when(connection.getNick()).thenReturn("nick");
endpoint.handleIrcError(IRCEventAdapter.ERR_NICKNAMEINUSE, "foo");
verify(connection).doNick("nick-");
when(connection.getNick()).thenReturn("nick---");
// confirm doNick was not called
verify(connection, never()).doNick("foo");
}
}
| everttigchelaar/camel-svn | components/camel-irc/src/test/java/org/apache/camel/component/irc/IrcEndpointTest.java | Java | apache-2.0 | 3,292 |
<?php
namespace Google\AdsApi\AdManager\v202111;
/**
* This file was generated from WSDL. DO NOT EDIT.
*/
class PrecisionError extends \Google\AdsApi\AdManager\v202111\ApiError
{
/**
* @var string $reason
*/
protected $reason = null;
/**
* @param string $fieldPath
* @param \Google\AdsApi\AdManager\v202111\FieldPathElement[] $fieldPathElements
* @param string $trigger
* @param string $errorString
* @param string $reason
*/
public function __construct($fieldPath = null, array $fieldPathElements = null, $trigger = null, $errorString = null, $reason = null)
{
parent::__construct($fieldPath, $fieldPathElements, $trigger, $errorString);
$this->reason = $reason;
}
/**
* @return string
*/
public function getReason()
{
return $this->reason;
}
/**
* @param string $reason
* @return \Google\AdsApi\AdManager\v202111\PrecisionError
*/
public function setReason($reason)
{
$this->reason = $reason;
return $this;
}
}
| googleads/googleads-php-lib | src/Google/AdsApi/AdManager/v202111/PrecisionError.php | PHP | apache-2.0 | 1,074 |
// STLport regression testsuite component.
// To compile as a separate example, please #define MAIN.
#include <algorithm>
#include <iostream>
#include "unary.h"
#ifdef MAIN
#define bcompos1_test main
#endif
#if !defined (STLPORT) || defined(__STL_USE_NAMESPACES)
using namespace std;
#endif
int bcompos1_test(int, char**)
{
cout<<"Results of bcompos1_test:"<<endl;
int array [6] = { -2, -1, 0, 1, 2, 3 };
binary_compose<logical_and<bool>, odd, positive>
b = binary_compose<logical_and<bool>, odd, positive>
(logical_and<bool>(), odd(), positive());
int* p = find_if((int*)array, (int*)array + 6, b);
if(p != array + 6)
cout << *p << " is odd and positive" << endl;
return 0;
}
| aestesis/elektronika | src/STLport/test/regression/bcompos1.cpp | C++ | apache-2.0 | 736 |
package com.zxinsight.classifier.ruleengine.admin;
import java.rmi.RemoteException;
import java.util.Map;
import javax.rules.admin.LocalRuleExecutionSetProvider;
import javax.rules.admin.RuleAdministrator;
import javax.rules.admin.RuleExecutionSet;
import javax.rules.admin.RuleExecutionSetDeregistrationException;
import javax.rules.admin.RuleExecutionSetProvider;
import javax.rules.admin.RuleExecutionSetRegisterException;
@SuppressWarnings("rawtypes")
public class RuleAdministratorImpl implements RuleAdministrator {
@Override
public void deregisterRuleExecutionSet(String bindUri, Map properties)
throws RuleExecutionSetDeregistrationException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
if (repository.getRuleExecutionSet(bindUri) == null) {
throw new RuleExecutionSetDeregistrationException(
"no execution set bound to: " + bindUri);
}
repository.unregisterRuleExecutionSet(bindUri);
}
@Override
public LocalRuleExecutionSetProvider getLocalRuleExecutionSetProvider(
Map properties) throws RemoteException {
return new LocalRuleExecutionSetProviderImple();
}
@Override
public RuleExecutionSetProvider getRuleExecutionSetProvider(Map properties)
throws RemoteException {
return new RuleExecutionSetProviderImpl();
}
@Override
public void registerRuleExecutionSet(String bindUri,
RuleExecutionSet ruleExecutionSet, Map properties)
throws RuleExecutionSetRegisterException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
repository.registerRuleExecutionSet(bindUri, ruleExecutionSet);
}
}
| kevin-ww/commentClassifier | src/main/java/com/zxinsight/classifier/ruleengine/admin/RuleAdministratorImpl.java | Java | apache-2.0 | 1,738 |
'use strict';
angular.module('playgroundApp', [
'playgroundApp.filters',
'playgroundApp.services',
'playgroundApp.directives',
'ngRoute',
'ui.bootstrap',
'ui',
])
.config(function($locationProvider, $routeProvider, $httpProvider,
$dialogProvider) {
$locationProvider.html5Mode(true);
// TODO: add list of promises to be resolved for injection
// TODO: resolved promises are injected into controller
// TODO: see http://www.youtube.com/watch?v=P6KITGRQujQ
$routeProvider
.when('/playground/', {
templateUrl: '/playground/main.html',
controller: MainController,
})
.when('/playground/p/:project_id/', {
templateUrl: '/playground/project.html',
controller: ProjectController,
reloadOnSearch: false,
});
$httpProvider.interceptors.push('pgHttpInterceptor');
// TODO: test these defaults?
$dialogProvider.options({
backdropFade: true,
modalFade: true,
});
})
.value('ui.config', {
codemirror: {
lineNumbers: true,
matchBrackets: true,
autofocus: true,
undoDepth: 440, // default = 40
}
});
| jackpunt/playground | app/js/app.js | JavaScript | apache-2.0 | 1,117 |
name 'machines'
maintainer 'YOUR_NAME'
maintainer_email 'YOUR_EMAIL'
license 'All rights reserved'
description 'Installs/Configures machines'
long_description 'Installs/Configures machines'
version '0.1.0'
| hdushan/quickquote | machines/metadata.rb | Ruby | apache-2.0 | 248 |
# Deventropy Shared Utils
See **[project website](http://www.deventropy.org/shared-utils/)** for more information.
| deventropy/shared-utils | src/site/resources/README.md | Markdown | apache-2.0 | 116 |
package trendli.me.makhana.common.entities;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public enum ActionType
{
MOVE( "Moving", "newTile" ), FABRICATING( "Fabricating" );
private final String verb;
private final List< String > dataKeys;
private ActionType( String verb, String... dataKeys )
{
this.verb = verb;
if ( dataKeys != null )
{
this.dataKeys = Arrays.asList( dataKeys );
}
else
{
this.dataKeys = Collections.emptyList( );
}
}
/**
* @return the dataKeys
*/
public List< String > getDataKeys( )
{
return dataKeys;
}
/**
* @return the verb
*/
public String getVerb( )
{
return verb;
}
}
| elliottmb/makhana | common/src/main/java/trendli/me/makhana/common/entities/ActionType.java | Java | apache-2.0 | 806 |
/*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kilt
import (
"github.com/google/kilt/pkg/rework"
log "github.com/golang/glog"
"github.com/spf13/cobra"
)
var reworkCmd = &cobra.Command{
Use: "rework",
Short: "Rework the patches belonging to patchsets",
Long: `Rework patchsets, allowing patches to be redistributed and re-ordered in the
branch. The rework command will create a working area detached form the current
kilt branch where modifications can be staged without changing the original
branch.
Kilt will examine the patchsets in the branch and determine which patches
belonging to patchsets need to be reworked, and create a queue of operations
that the user will drive. The user can also perform other rework-related
operations, such as re-ordering or merging patches.
Once the user is finished, kilt will verify that the rework is valid, and
modify the previous kilt branch to point to the result of the rework. A rework
is considered valid if the end state is identical to the initial state -- the
diff between them is empty.`,
Args: argsRework,
Run: runRework,
}
var reworkFlags = struct {
begin bool
finish bool
validate bool
rContinue bool
abort bool
skip bool
force bool
auto bool
patchsets []string
all bool
}{}
func init() {
rootCmd.AddCommand(reworkCmd)
reworkCmd.Flags().BoolVar(&reworkFlags.begin, "begin", true, "begin rework")
reworkCmd.Flags().MarkHidden("begin")
reworkCmd.Flags().BoolVar(&reworkFlags.finish, "finish", false, "validate and finish rework")
reworkCmd.Flags().BoolVar(&reworkFlags.abort, "abort", false, "abort rework")
reworkCmd.Flags().BoolVarP(&reworkFlags.force, "force", "f", false, "when finishing, force finish rework, regardless of validation")
reworkCmd.Flags().BoolVar(&reworkFlags.validate, "validate", false, "validate rework")
reworkCmd.Flags().BoolVar(&reworkFlags.rContinue, "continue", false, "continue rework")
reworkCmd.Flags().BoolVar(&reworkFlags.skip, "skip", false, "skip rework step")
reworkCmd.Flags().BoolVar(&reworkFlags.auto, "auto", false, "attempt to automatically complete rework")
reworkCmd.Flags().BoolVarP(&reworkFlags.all, "all", "a", false, "specify all patchsets for rework")
reworkCmd.Flags().StringSliceVarP(&reworkFlags.patchsets, "patchset", "p", nil, "specify individual patchset for rework")
}
func argsRework(*cobra.Command, []string) error {
return nil
}
func runRework(cmd *cobra.Command, args []string) {
var c *rework.Command
var err error
switch {
case reworkFlags.finish:
reworkFlags.auto = true
c, err = rework.NewFinishCommand(reworkFlags.force)
case reworkFlags.abort:
c, err = rework.NewAbortCommand()
case reworkFlags.skip:
c, err = rework.NewSkipCommand()
case reworkFlags.validate:
c, err = rework.NewValidateCommand()
case reworkFlags.rContinue:
c, err = rework.NewContinueCommand()
case reworkFlags.begin:
targets := []rework.TargetSelector{rework.FloatingTargets{}}
if reworkFlags.all {
targets = append(targets, rework.AllTargets{})
} else if len(reworkFlags.patchsets) > 0 {
for _, p := range reworkFlags.patchsets {
targets = append(targets, rework.PatchsetTarget{Name: p})
}
}
c, err = rework.NewBeginCommand(targets...)
default:
log.Exitf("No operation specified")
}
if err != nil {
log.Exitf("Rework failed: %v", err)
}
if reworkFlags.auto {
err = c.ExecuteAll()
} else {
err = c.Execute()
}
if err != nil {
log.Errorf("Rework failed: %v", err)
}
if err = c.Save(); err != nil {
log.Exitf("Failed to save rework state: %v", err)
}
}
| google/kilt | pkg/cmd/kilt/rework.go | GO | apache-2.0 | 4,097 |
/*
* Copyright 2014-2015 Nikos Grammatikos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://raw.githubusercontent.com/nikosgram13/OglofusProtection/master/LICENSE
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.nikosgram.oglofus.protection;
import com.google.common.base.Optional;
import com.sk89q.intake.argument.ArgumentException;
import com.sk89q.intake.argument.ArgumentParseException;
import com.sk89q.intake.argument.CommandArgs;
import com.sk89q.intake.parametric.ProvisionException;
import me.nikosgram.oglofus.protection.api.ActionResponse;
import me.nikosgram.oglofus.protection.api.CommandExecutor;
import me.nikosgram.oglofus.protection.api.entity.User;
import me.nikosgram.oglofus.protection.api.message.MessageType;
import me.nikosgram.oglofus.protection.api.region.ProtectionRank;
import me.nikosgram.oglofus.protection.api.region.ProtectionRegion;
import me.nikosgram.oglofus.protection.api.region.ProtectionStaff;
import org.apache.commons.lang3.ClassUtils;
import org.spongepowered.api.entity.player.Player;
import org.spongepowered.api.service.user.UserStorage;
import org.spongepowered.api.util.command.CommandSource;
import javax.annotation.Nullable;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class OglofusProtectionStaff implements ProtectionStaff {
private final List<User> staff = new ArrayList<User>();
private final Map<UUID, ProtectionRank> ranks = new HashMap<UUID, ProtectionRank>();
private final User owner;
private final ProtectionRegion region;
private final OglofusSponge sponge;
protected OglofusProtectionStaff(ProtectionRegion region, OglofusSponge sponge) {
this.region = region;
this.sponge = sponge;
owner = sponge.getUserManager().getUser(UUID.fromString(sponge.connector.getString(
"oglofus_regions", "uuid", region.getUuid().toString(), "owner"
).get())).get();
Map<String, String> staff = sponge.connector.getStringMap(
"oglofus_regions", "uuid", region.getUuid().toString(), new String[]{"player", "rank"}
);
for (String uid : staff.keySet()) {
UUID uuid = UUID.fromString(uid);
this.staff.add(sponge.getUserManager().getUser(uuid).get());
ranks.put(uuid, ProtectionRank.valueOf(staff.get(uid)));
}
}
@Override
public UUID getOwnerUuid() {
return owner.getUuid();
}
@Override
public User getOwner() {
return owner;
}
@Override
@SuppressWarnings("unchecked")
public <T> Optional<T> getOwnerAs(Class<T> tClass) {
if (ClassUtils.isAssignable(tClass, Player.class)) {
return (Optional<T>) sponge.server.getPlayer(owner.getUuid());
} else if (ClassUtils.isAssignable(tClass, User.class)) {
UserStorage storage;
if ((storage = sponge.game.getServiceManager().provide(UserStorage.class).orNull()) !=
null) {
return (Optional<T>) storage.get(owner.getUuid()).orNull();
}
}
return Optional.absent();
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getOfficersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getOfficersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getOfficersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getOfficers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getOfficers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Officer)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getMembersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getMembersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getMembersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getMembers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getMembers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Member)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getStaffAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (User user : this) {
Player player;
if ((player = sponge.server.getPlayer(user.getUuid()).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getStaffUuid() {
Collection<UUID> returned = new ArrayList<UUID>();
for (User user : this) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public boolean isOwner(UUID target) {
return owner.getUuid().equals(target);
}
@Override
public boolean isOwner(User target) {
return owner.getUuid().equals(target.getUuid());
}
@Override
public boolean isOfficer(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Officer);
}
@Override
public boolean isOfficer(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Officer);
}
@Override
public boolean isMember(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Member);
}
@Override
public boolean isMember(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Member);
}
@Override
public boolean isStaff(UUID target) {
return ranks.containsKey(target);
}
@Override
public boolean isStaff(User target) {
return ranks.containsKey(target.getUuid());
}
@Override
public boolean hasOwnerAccess(UUID target) {
return isOwner(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOwnerAccess(User target) {
return isOwner(target) || target.hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOfficerAccess(UUID target) {
return isOfficer(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasOfficerAccess(User target) {
return isOfficer(target) || target.hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(UUID target) {
return isMember(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(User target) {
return isMember(target) || target.hasPermission("oglofus.protection.bypass.member");
}
@Override
public ProtectionRank getRank(UUID target) {
return ranks.containsKey(target) ? ranks.get(target) : ProtectionRank.None;
}
@Override
public ProtectionRank getRank(User target) {
return ranks.containsKey(target.getUuid()) ? ranks.get(target.getUuid()) : ProtectionRank.None;
}
@Override
public void broadcast(String message) {
broadcast(MessageType.CHAT, message);
}
@Override
public void broadcast(String message, ProtectionRank rank) {
broadcast(MessageType.CHAT, message, rank);
}
@Override
public void broadcast(MessageType type, String message) {
for (User user : this) {
user.sendMessage(type, message);
}
}
@Override
public void broadcast(MessageType type, String message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(type, message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(type, message);
}
break;
case Owner:
owner.sendMessage(type, message);
break;
}
}
@Override
public void broadcastRaw(Object message) {
for (User user : this) {
user.sendMessage(message);
}
}
@Override
public void broadcastRaw(Object message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(message);
}
break;
case Owner:
owner.sendMessage(message);
break;
}
}
@Override
public void broadcastRaw(MessageType type, Object message) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void broadcastRaw(MessageType type, Object message, ProtectionRank rank) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public ActionResponse reFlag() {
//TODO: make it.
return null;
}
@Override
public ActionResponse invite(Object sender, UUID target) {
return sponge.getUserManager().invite(sender, target, region);
}
@Override
public ActionResponse invite(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse invite(Object sender, User target) {
return null;
}
@Override
public ActionResponse invite(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse invite(UUID target) {
return sponge.getUserManager().invite(target, region);
}
@Override
public ActionResponse invite(User target) {
return null;
}
@Override
public ActionResponse kick(Object sender, UUID target) {
if (sender instanceof CommandSource) {
if (sender instanceof Player) {
if (region.getProtectionStaff().hasOwnerAccess(((Player) sender).getUniqueId())) {
//TODO: call the handler PlayerKickHandler.
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
if (((CommandSource) sender).hasPermission("oglofus.protection.bypass")) {
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
return ActionResponse.Failure.setMessage("object");
}
@Override
public ActionResponse kick(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse kick(Object sender, User target) {
return null;
}
@Override
public ActionResponse kick(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse kick(UUID target) {
//TODO: call the handler PlayerKickHandler.
return null;
}
@Override
public ActionResponse kick(User target) {
return null;
}
@Override
public ActionResponse promote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(Object sender, User target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse promote(UUID target) {
return null;
}
@Override
public ActionResponse promote(User target) {
return null;
}
@Override
public ActionResponse demote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(Object sender, User target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse demote(UUID target) {
return null;
}
@Override
public ActionResponse demote(User target) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(User target, ProtectionRank rank) {
return null;
}
@Override
public Iterator<User> iterator() {
return staff.iterator();
}
@Override
public boolean isProvided() {
return false;
}
@Nullable
@Override
public User get(CommandArgs arguments, List<? extends Annotation> modifiers) throws ArgumentException, ProvisionException {
String name = arguments.next();
Optional<User> user = sponge.getUserManager().getUser(name);
if (user.isPresent() && isStaff(user.get())) {
return user.get();
} else {
throw new ArgumentParseException(String.format("I can't find the Staff with name '%s'.", name));
}
}
@Override
public List<String> getSuggestions(String prefix) {
List<String> returned = new ArrayList<String>();
for (User user : this) {
if (user.getName().startsWith(prefix)) {
returned.add(user.getName());
}
}
return returned;
}
}
| Oglofus/OglofusProtection | sponge/src/main/java/me/nikosgram/oglofus/protection/OglofusProtectionStaff.java | Java | apache-2.0 | 16,214 |
package commons;
import org.makagiga.commons.ConfigFile;
import org.makagiga.test.AbstractEnumTest;
import org.makagiga.test.Test;
import org.makagiga.test.TestMethod;
import org.makagiga.test.Tester;
@Test(className = ConfigFile.Format.class)
public final class TestConfigFile_Format extends AbstractEnumTest<ConfigFile.Format> {
// public
public TestConfigFile_Format() {
super(
ConfigFile.Format.values(),
ConfigFile.Format.DESKTOP, ConfigFile.Format.INI
);
}
@Test
public void test_commons() {
for (final ConfigFile.Format i : ConfigFile.Format.values()) {
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup("");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey("");
}
} );
}
final String LONG_VALUE = "AZaz09-";
final String SHORT_VALUE = "X";
// DESKTOP
ConfigFile.Format f = ConfigFile.Format.DESKTOP;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("[");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("]");
}
} );
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateKey("=");
}
} );
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
f.validateGroup(" ");
f.validateGroup("Foo Bar");
// INI
f = ConfigFile.Format.INI;
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
}
@Test(
methods = @TestMethod(name = "equals", parameters = "String, String")
)
public void test_equals() {
ConfigFile.Format f;
f = ConfigFile.Format.DESKTOP;
assert f.equals("foo", "foo");
assert !f.equals("foo", "FOO");
f = ConfigFile.Format.INI;
assert f.equals("foo", "foo");
assert f.equals("foo", "FOO");
}
@Test(
methods = @TestMethod(name = "escape", parameters = "String")
)
public void test_escape() {
assertNull(ConfigFile.Format.escape(null));
assertEmpty(ConfigFile.Format.escape(""));
assertEquals("\\tFoo\\sBar\\r\\nBaz\\\\", ConfigFile.Format.escape("\tFoo Bar\r\nBaz\\"));
}
@Test(
methods = @TestMethod(name = "unescape", parameters = "String")
)
public void test_unescape() {
assertNull(ConfigFile.Format.unescape(null));
assertEmpty(ConfigFile.Format.unescape(""));
assertEquals("Foo Bar", ConfigFile.Format.unescape("Foo Bar"));
assertEquals("\tFoo Bar\r\nBaz\\", ConfigFile.Format.unescape("\\tFoo\\sBar\\r\\nBaz\\\\"));
assertEquals("\n\n \\\\", ConfigFile.Format.unescape("\\n\\n\\s\\s\\\\\\\\"));
}
@Test(
methods = @TestMethod(name = "getComment")
)
public void test_getComment() {
assert ConfigFile.Format.DESKTOP.getComment().equals("#");
assert ConfigFile.Format.INI.getComment().equals(";");
}
@Test(
methods = @TestMethod(name = "getEOL")
)
public void test_getEOL() {
assert ConfigFile.Format.DESKTOP.getEOL().equals("\n");
assert ConfigFile.Format.INI.getEOL().equals("\r\n");
}
@Test(
methods = @TestMethod(name = "getSuffix")
)
public void test_getSuffix() {
assert ConfigFile.Format.DESKTOP.getSuffix().equals(".desktop");
assert ConfigFile.Format.INI.getSuffix().equals(".ini");
}
@Test(
methods = @TestMethod(name = "isCaseSensitive")
)
public void test_isCaseSensitive() {
assert ConfigFile.Format.DESKTOP.isCaseSensitive();
assert !ConfigFile.Format.INI.isCaseSensitive();
}
}
| stuffer2325/Makagiga | test/src/commons/TestConfigFile_Format.java | Java | apache-2.0 | 4,186 |
package org.apache.rave.portal.service.impl;
import org.apache.rave.model.ExcercicesHasTrainingPlan;
import org.apache.rave.model.Serie;
import org.apache.rave.model.TrainingPlan;
import org.apache.rave.portal.repository.ExcercicesHasTrainingPlanRepository;
import org.apache.rave.portal.repository.SerieRepository;
import org.apache.rave.portal.repository.TrainingPlanRepository;
import org.apache.rave.portal.service.TrainingPlanService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.Collection;
/**
* Created by fhernandez on 23/09/14.
*/
@Service
public class DefaultTrainingPlanService implements TrainingPlanService {
private final Logger logger = LoggerFactory.getLogger(DefaultTrainingPlanService.class);
private final TrainingPlanRepository trainingPlanRepository;
private final ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository;
private final SerieRepository serieRepository;
@Autowired
public DefaultTrainingPlanService(TrainingPlanRepository trainingPlanRepository,ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository,SerieRepository serieRepository) {
this.trainingPlanRepository = trainingPlanRepository;
this.exercisesHasTrainingPlanRepository = exercisesHasTrainingPlanRepository;
this.serieRepository = serieRepository;
}
@Override
@Transactional
public TrainingPlan getById(Long trainingPlanId) {
TrainingPlan trainingPlan =trainingPlanRepository.getById(trainingPlanId);
if(trainingPlan!=null) {
trainingPlan.getExercisesHasTrainingplans().size();
}
return trainingPlan;
}
@Transactional
public TrainingPlan save(TrainingPlan newPlan) {
Collection<ExcercicesHasTrainingPlan> exerciseList=newPlan.getExercisesHasTrainingplans();
try {
if(newPlan.getEntityId()==null) {
newPlan = trainingPlanRepository.save(newPlan);
}
for (ExcercicesHasTrainingPlan exerciseHasTraining : exerciseList) {
Serie serie = serieRepository.save(exerciseHasTraining.getSerie());
exerciseHasTraining.setSerie(serie);
exerciseHasTraining.setSerieId(serie.getEntityId());
exerciseHasTraining.setTrainingplanId(newPlan.getEntityId());
exerciseHasTraining.setTrainingPlan(newPlan);
}
exercisesHasTrainingPlanRepository.saveList(exerciseList);
}catch(Exception e){
logger.error("Exception saving plan " + e);
}
return newPlan;
}
public Collection<TrainingPlan> getByTrainerID(Long trainerId){
return trainingPlanRepository.getByTrainerID(trainerId);
}
}
| lletsica/my_test_repo | rave-components/rave-core/src/main/java/org/apache/rave/portal/service/impl/DefaultTrainingPlanService.java | Java | apache-2.0 | 2,990 |
/*
===========================================================================
Copyright 2002-2010 Martin Dvorak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package com.mindcognition.mindraider.ui.swing.trash;
import java.awt.BorderLayout;
import java.awt.GridLayout;
import java.awt.Toolkit;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.FocusEvent;
import java.util.HashMap;
import javax.swing.JButton;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JToolBar;
import javax.swing.JTree;
import javax.swing.event.TreeExpansionEvent;
import javax.swing.event.TreeExpansionListener;
import javax.swing.event.TreeModelEvent;
import javax.swing.event.TreeModelListener;
import javax.swing.event.TreeSelectionEvent;
import javax.swing.event.TreeSelectionListener;
import javax.swing.event.TreeWillExpandListener;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.DefaultTreeModel;
import javax.swing.tree.ExpandVetoException;
import javax.swing.tree.MutableTreeNode;
import javax.swing.tree.TreePath;
import javax.swing.tree.TreeSelectionModel;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import com.emental.mindraider.core.MindRaider;
import com.emental.mindraider.core.rest.Metadata;
import com.emental.mindraider.core.rest.ResourceDescriptor;
import com.emental.mindraider.core.rest.resource.FolderResource;
import com.emental.mindraider.core.rest.resource.OutlineResource;
import com.emental.mindraider.ui.dialogs.ProgressDialogJFrame;
import com.emental.mindraider.ui.gfx.IconsRegistry;
import com.mindcognition.mindraider.application.model.label.LabelCustodianListener;
import com.mindcognition.mindraider.l10n.Messages;
import com.mindcognition.mindraider.ui.swing.dialogs.RestoreNotebookJDialog;
import com.mindcognition.mindraider.ui.swing.explorer.ExplorerJPanel;
import com.mindcognition.mindraider.utils.SwingWorker;
public class TrashJPanel extends JPanel implements
TreeWillExpandListener, TreeExpansionListener, LabelCustodianListener {
private static final Logger logger = Logger.getLogger(TrashJPanel.class);
public static final int LEVEL_ROOT = 0;
public static final int LEVEL_FOLDERS = 1;
public static final int LEVEL_NOTEBOOKS = 2;
/*
* UI components
*/
protected DefaultMutableTreeNode rootNode;
protected DefaultTreeModel treeModel;
protected final JTree tree;
protected JButton undoButton, emptyButton, deleteButton;
private Toolkit toolkit = Toolkit.getDefaultToolkit();
/*
* model
*/
private HashMap treeNodeToResourceUriMap;
/*
* singleton
*/
private static TrashJPanel singleton;
public static TrashJPanel getInstance() {
if (singleton == null) {
singleton = new TrashJPanel();
}
return singleton;
}
private ResourceDescriptor[] discardedNotebooksDescriptors;
/**
* Constructor.
*/
private TrashJPanel() {
treeNodeToResourceUriMap = new HashMap();
rootNode = new DefaultMutableTreeNode(Messages.getString("TrashJPanel.notebookArchive"));
treeModel = new DefaultTreeModel(rootNode);
treeModel.addTreeModelListener(new MyTreeModelListener());
tree = new JTree(treeModel);
tree.setEditable(false);
tree.getSelectionModel().setSelectionMode(
TreeSelectionModel.SINGLE_TREE_SELECTION);
tree.addTreeExpansionListener(this);
tree.addTreeWillExpandListener(this);
tree.setShowsRootHandles(true);
tree.putClientProperty("JTree.lineStyle", "Angled");
// tree rendered
// TODO implement own renderer in order to tooltips
tree.setCellRenderer(new TrashTreeCellRenderer(IconsRegistry
.getImageIcon("trashFull.png"), IconsRegistry
.getImageIcon("explorerNotebookIcon.png")));
setLayout(new BorderLayout());
// control panel
JToolBar tp = new JToolBar();
tp.setLayout(new GridLayout(1, 6));
undoButton = new JButton("", IconsRegistry
.getImageIcon("trashUndo.png"));
undoButton.setEnabled(false);
undoButton.setToolTipText("Restore Outline");
undoButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
new RestoreNotebookJDialog(
(String)treeNodeToResourceUriMap.get(node),
"Restore Outline",
"Restore",
true);
}
});
tp.add(undoButton);
deleteButton = new JButton("", IconsRegistry
.getImageIcon("explorerDeleteSmall.png"));
deleteButton.setToolTipText("Delete Outline");
deleteButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
int result = JOptionPane.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE this Outline?",
"Delete Outline", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
MindRaider.labelCustodian
.deleteOutline((String) treeNodeToResourceUriMap
.get(node));
refresh();
ExplorerJPanel.getInstance().refresh();
}
}
});
tp.add(deleteButton);
emptyButton = new JButton("", IconsRegistry
.getImageIcon("trashEmpty.png"));
emptyButton.setToolTipText(Messages.getString("TrashJPanel.emptyArchive"));
emptyButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
int result = JOptionPane
.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE all discarded Outlines?",
"Empty Trash", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
final SwingWorker worker = new SwingWorker() {
public Object construct() {
ProgressDialogJFrame progressDialogJFrame = new ProgressDialogJFrame(
"Empty Trash",
"<html><br> <b>Deleting:</b> </html>");
try {
ResourceDescriptor[] resourceDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (resourceDescriptors != null) {
for (int i = 0; i < resourceDescriptors.length; i++) {
MindRaider.labelCustodian
.deleteOutline(resourceDescriptors[i]
.getUri());
}
refresh();
}
} finally {
if (progressDialogJFrame != null) {
progressDialogJFrame.dispose();
}
}
return null;
}
};
worker.start();
}
}
});
tp.add(emptyButton);
add(tp, BorderLayout.NORTH);
// add the tree
JScrollPane scrollPane = new JScrollPane(tree);
add(scrollPane);
// build the whole tree
buildTree();
// click handler
tree.addTreeSelectionListener(new TreeSelectionListener() {
public void valueChanged(TreeSelectionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
logger.debug("Tree selection path: "
+ node.getPath()[node.getLevel()]);
enableDisableToolbarButtons(node.getLevel());
}
});
}
/**
* Build tree. This method is called on startup and tree refresh in order to
* reload disc content. Adding/removing of particular nodes during the
* program run is performed on individual nodes.
*/
void buildTree() {
discardedNotebooksDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (!ArrayUtils.isEmpty(discardedNotebooksDescriptors)) {
for (int i = 0; i < discardedNotebooksDescriptors.length; i++) {
addDiscardedNotebookNode(discardedNotebooksDescriptors[i]
.getLabel(), discardedNotebooksDescriptors[i].getUri());
}
// now expland all rows
for (int i = 0; i < tree.getRowCount(); i++) {
tree.expandRow(i);
}
}
tree.setSelectionRow(0);
enableDisableToolbarButtons(0);
}
/**
* Add discarded notebook node.
*
* @param uri
* notebook node.
* @return the node.
*/
public DefaultMutableTreeNode addDiscardedNotebookNode(String label,
String uri) {
DefaultMutableTreeNode parent = null;
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Call this method in order to update the tree.
*/
public void refresh() {
clear();
buildTree();
}
/**
* Move notebook up in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookUp(String folderUri, String notebookUri) {
logger.debug(" moveNotebookUp: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
// add notebook to folder
boolean result = MindRaider.labelCustodian.moveNotebookUp(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookUp(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline up: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Move notebook down in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookDown(String folderUri, String notebookUri) {
logger.debug(" moveNotebookDown: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
boolean result = MindRaider.labelCustodian.moveNotebookDown(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookDown(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline down: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Add notebook node to folder node (on new notebook creation).
*
* @param notebookUri
* newly created notebook URI.
*/
public void addNotebookToFolder(String notebookUri) {
logger.debug(" URI of created notebook is: " + notebookUri);
if (notebookUri != null) {
// add notebook to selected folder
TreePath treePath = tree.getSelectionPath();
String folderUri = (String) treeNodeToResourceUriMap.get(treePath
.getLastPathComponent());
logger.debug("Enclosing folder URI is: " + folderUri);
if (folderUri != null) {
try {
// add notebook to folder
MindRaider.labelCustodian.addOutline(folderUri,
notebookUri);
// now add it in the tree
OutlineResource notebookResource = MindRaider.outlineCustodian
.getActiveOutlineResource();
addNotebookNode((DefaultMutableTreeNode) treePath
.getLastPathComponent(), notebookResource.resource
.getMetadata().getUri().toASCIIString(),
notebookResource.getLabel());
} catch (Exception e1) {
logger.error("addNotebookToFolder(String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Creation Error",
"Unable to add Outline to folder: "
+ e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return;
}
}
} else {
logger
.debug("Outline wont be added to folder - it's URI is null!");
}
}
/**
* Remove all nodes except the root node.
*/
public void clear() {
rootNode.removeAllChildren();
treeModel.reload();
treeNodeToResourceUriMap.clear();
}
/**
* Remove the currently selected node.
*/
public void removeCurrentNode() {
TreePath currentSelection = tree.getSelectionPath();
if (currentSelection != null) {
DefaultMutableTreeNode currentNode = (DefaultMutableTreeNode) (currentSelection
.getLastPathComponent());
MutableTreeNode parent = (MutableTreeNode) (currentNode.getParent());
if (parent != null) {
treeModel.removeNodeFromParent(currentNode);
return;
}
}
// Either there was no selection, or the root was selected.
toolkit.beep();
}
/**
* Add child to the currently selected node.
*/
public DefaultMutableTreeNode addObject(Object child) {
DefaultMutableTreeNode parentNode = null;
TreePath parentPath = tree.getSelectionPath();
if (parentPath == null) {
parentNode = rootNode;
} else {
parentNode = (DefaultMutableTreeNode) (parentPath
.getLastPathComponent());
}
return addObject(parentNode, child, true);
}
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child) {
return addObject(parent, child, false);
}
/**
* Add folder node.
*
* @param uri
* folder URI.
* @return the node.
*/
public DefaultMutableTreeNode addFolderNode(String uri) {
DefaultMutableTreeNode parent = null;
// get label from URI
FolderResource resource = new FolderResource(MindRaider.labelCustodian
.get(uri));
Object child = resource.getLabel();
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add notebook node.
*
* @param parent
* folder node.
* @param uri
* notebook URI.
* @param label
* notebook label.
* @return the node.
*/
public DefaultMutableTreeNode addNotebookNode(
DefaultMutableTreeNode parent, String uri, String label) {
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add an child object to a parent object.
*
* @param parent
* the parent object.
* @param child
* the child object.
* @param shouldBeVisible
* if <code>true</code> the object should be visible.
* @return Returns a <code>DefaultMutableTreeNode</code>
*/
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child, boolean shouldBeVisible) {
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
// Make sure the user can see the lovely new node.
if (shouldBeVisible) {
tree.scrollPathToVisible(new TreePath(childNode.getPath()));
}
return childNode;
}
/**
* Custom MyTreeModelListerer class.
*/
class MyTreeModelListener implements TreeModelListener {
/**
* Logger for this class.
*/
private final Logger logger = Logger
.getLogger(MyTreeModelListener.class);
/**
* @see javax.swing.event.TreeModelListener#treeNodesChanged(javax.swing.event.TreeModelEvent)
*/
public void treeNodesChanged(TreeModelEvent e) {
DefaultMutableTreeNode node;
node = (DefaultMutableTreeNode) (e.getTreePath()
.getLastPathComponent());
/*
* If the event lists children, then the changed node is the child
* of the node we've already gotten. Otherwise, the changed node and
* the specified node are the same.
*/
// ToDo
try {
int index = e.getChildIndices()[0];
node = (DefaultMutableTreeNode) (node.getChildAt(index));
} catch (NullPointerException exc) {
//
}
logger.debug("The user has finished editing the node.");
logger.debug("New value: " + node.getUserObject());
}
public void treeNodesInserted(TreeModelEvent e) {
}
public void treeNodesRemoved(TreeModelEvent e) {
}
public void treeStructureChanged(TreeModelEvent e) {
}
}
public void treeCollapsed(TreeExpansionEvent e) {
logger.debug("Tree colapsed event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeExpansionListener#treeExpanded(javax.swing.event.TreeExpansionEvent)
*/
public void treeExpanded(TreeExpansionEvent e) {
logger.debug("Tree expanded event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillCollapse(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillCollapse(TreeExpansionEvent e)
throws ExpandVetoException {
logger.debug("Tree will collapse " + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillExpand(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillExpand(TreeExpansionEvent e) throws ExpandVetoException {
logger.debug("Tree will expand " + e.getPath());
/*
* DefaultMutableTreeNode node = (DefaultMutableTreeNode)
* tree.getLastSelectedPathComponent(); if (node == null) { return; }
* logger.debug(""+node.getPath()[node.getLevel()]); // buttons
* disabling switch(node.getLevel()) { case LEVEL_FOLDERS: // disconnect
* childrens from the node Enumeration enumeration=node.children(); //
* delete nodes itself while (enumeration.hasMoreElements()) { Object
* object=enumeration.nextElement();
* treeNodeToResourceUriMap.remove(object);
* treeModel.removeNodeFromParent((MutableTreeNode)object); } // get
* folder URI logger.debug("Expanding folder:
* "+treeNodeToResourceUriMap.get(node)); FolderResource folder =new
* FolderResource(MindRaider.folderCustodian.get((String)treeNodeToResourceUriMap.get(node)));
* String[] notebookUris=folder.getNotebookUris(); if (notebookUris !=
* null) { for (int i= 0; i < notebookUris.length; i++) {
* NotebookResource notebook=new
* NotebookResource(MindRider.notebookCustodian.get(notebookUris[i]));
* addNotebookNode(node,notebook.resource.metadata.uri.toASCIIString(),notebook.getLabel()); } } }
*/
}
/**
* @see com.emental.LabelCustodianListener.folder.FolderCustodianListener#folderCreated()
*/
public void labelCreated(FolderResource folder) {
Metadata meta = folder.getResource().getMetadata();
logger.debug("Folder created: " + meta.getUri().toASCIIString());
// handle creation of the folder
addFolderNode(meta.getUri().toASCIIString());
}
/**
* @see java.awt.event.FocusListener#focusGained(java.awt.event.FocusEvent)
*/
public void focusGained(FocusEvent arg0) {
// TODO Auto-generated method stub
}
/**
* Change status in the toolbar buttons.
*
* @param level
* The level could be <code>LEVEL_ROOT</code> or
* <code>LEVEL_FOLDERS</code>
*/
protected void enableDisableToolbarButtons(int level) {
// buttons disabling
switch (level) {
case LEVEL_ROOT:
undoButton.setEnabled(false);
deleteButton.setEnabled(false);
emptyButton.setEnabled(true);
break;
case LEVEL_FOLDERS:
undoButton.setEnabled(true);
deleteButton.setEnabled(true);
emptyButton.setEnabled(true);
break;
}
}
private static final long serialVersionUID = 5028293540089775890L;
}
| dvorka/mindraider | mr7/src/main/java/com/mindcognition/mindraider/ui/swing/trash/TrashJPanel.java | Java | apache-2.0 | 24,481 |
package com.fpliu.newton.ui.list;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.GridView;
/**
* @author [email protected] 2017-06-30.
*/
public interface IGrid<T, V extends GridView> extends ICommon<T> {
V getGridView();
void setItemAdapter(ItemAdapter<T> itemAdapter);
ItemAdapter<T> getItemAdapter();
void setOnItemClickListener(AdapterView.OnItemClickListener listener);
int getItemViewTypeCount();
int getItemViewType(int position);
View getItemView(int position, View convertView, ViewGroup parent);
void notifyDataSetChanged();
void setNumColumns(int numColumns);
}
| leleliu008/Android-List | library/src/main/java/com/fpliu/newton/ui/list/IGrid.java | Java | apache-2.0 | 688 |
/*******************************************************************************
* Copyright (c) 2012-2013 University of Stuttgart.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* Oliver Kopp - initial API and implementation
*******************************************************************************/
/**
* This package contains the REST resources
*
* Mostly, they produces Viewables, where a JSP and the current resource is
* passed As the JSP itself handles plain Java objects and not Responses, the
* resources have also methods returning POJOs. This might be ugly design, but
* was quick to implement.
*
* The package structure is mirrored in src/main/webapp/jsp to ease finding the
* JSPs belonging to a resource.
*
* The resources are <em>not</em> in line with the resource model of the TOSCA
* container. Especially, we do not employ HATEOAS here.
*/
package org.eclipse.winery.repository.resources;
| YannicSowoidnich/winery | org.eclipse.winery.repository/src/main/java/org/eclipse/winery/repository/resources/package-info.java | Java | apache-2.0 | 1,234 |
/**
*
Package: MAG - VistA Imaging
WARNING: Per VHA Directive 2004-038, this routine should not be modified.
Date Created: Jul 10, 2012
Site Name: Washington OI Field Office, Silver Spring, MD
Developer: VHAISWWERFEJ
Description:
;; +--------------------------------------------------------------------+
;; Property of the US Government.
;; No permission to copy or redistribute this software is given.
;; Use of unreleased versions of this software requires the user
;; to execute a written test agreement with the VistA Imaging
;; Development Office of the Department of Veterans Affairs,
;; telephone (301) 734-0100.
;;
;; The Food and Drug Administration classifies this software as
;; a Class II medical device. As such, it may not be changed
;; in any way. Modifications to this software may result in an
;; adulterated medical device under 21CFR820, the use of which
;; is considered to be a violation of US Federal Statutes.
;; +--------------------------------------------------------------------+
*/
package gov.va.med.imaging.pathology.rest.translator;
import java.util.Date;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* @author VHAISWWERFEJ
*
*/
public class PathologyRestTranslatorTest
{
@Test
public void testDateTranslation()
{
try
{
Date date = PathologyRestTranslator.translateDate("201207101435");
System.out.println("Date: " + date);
}
catch(Exception ex)
{
ex.printStackTrace();
fail(ex.getMessage());
}
}
}
| VHAINNOVATIONS/Telepathology | Source/Java/PathologyWebApp/main/test/java/gov/va/med/imaging/pathology/rest/translator/PathologyRestTranslatorTest.java | Java | apache-2.0 | 1,621 |
package io.quarkus.grpc.examples.hello;
import static io.restassured.RestAssured.get;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.jupiter.api.Test;
import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
class HelloWorldMutualTlsEndpointTest {
@Test
public void testHelloWorldServiceUsingBlockingStub() {
String response = get("/hello/blocking/neo").asString();
assertThat(response).isEqualTo("Hello neo");
}
@Test
public void testHelloWorldServiceUsingMutinyStub() {
String response = get("/hello/mutiny/neo-mutiny").asString();
assertThat(response).isEqualTo("Hello neo-mutiny");
}
}
| quarkusio/quarkus | integration-tests/grpc-mutual-auth/src/test/java/io/quarkus/grpc/examples/hello/HelloWorldMutualTlsEndpointTest.java | Java | apache-2.0 | 679 |
/*
* avdtp_internal.h - avdtp handling
* Copyright (c) 2015-2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bluetooth/avdtp.h>
/* @brief A2DP ROLE's */
#define A2DP_SRC_ROLE 0x00
#define A2DP_SNK_ROLE 0x01
/* @brief AVDTP Role */
#define BT_AVDTP_INT 0x00
#define BT_AVDTP_ACP 0x01
#define BT_L2CAP_PSM_AVDTP 0x0019
/* AVDTP SIGNAL HEADER - Packet Type*/
#define BT_AVDTP_PACKET_TYPE_SINGLE 0x00
#define BT_AVDTP_PACKET_TYPE_START 0x01
#define BT_AVDTP_PACKET_TYPE_CONTINUE 0x02
#define BT_AVDTP_PACKET_TYPE_END 0x03
/* AVDTP SIGNAL HEADER - MESSAGE TYPE */
#define BT_AVDTP_MSG_TYPE_CMD 0x00
#define BT_AVDTP_MSG_TYPE_GEN_REJECT 0x01
#define BT_AVDTP_MSG_TYPE_ACCEPT 0x02
#define BT_AVDTP_MSG_TYPE_REJECT 0x03
/* @brief AVDTP SIGNAL HEADER - Signal Identifier */
#define BT_AVDTP_DISCOVER 0x01
#define BT_AVDTP_GET_CAPABILITIES 0x02
#define BT_AVDTP_SET_CONFIGURATION 0x03
#define BT_AVDTP_GET_CONFIGURATION 0x04
#define BT_AVDTP_RECONFIGURE 0x05
#define BT_AVDTP_OPEN 0x06
#define BT_AVDTP_START 0x07
#define BT_AVDTP_CLOSE 0x08
#define BT_AVDTP_SUSPEND 0x09
#define BT_AVDTP_ABORT 0x0a
#define BT_AVDTP_SECURITY_CONTROL 0x0b
#define BT_AVDTP_GET_ALL_CAPABILITIES 0x0c
#define BT_AVDTP_DELAYREPORT 0x0d
/* @brief AVDTP STATE */
#define BT_AVDTP_STATE_IDLE 0x01
#define BT_AVDTP_STATE_CONFIGURED 0x02
#define BT_AVDTP_STATE_OPEN 0x03
#define BT_AVDTP_STATE_STREAMING 0x04
#define BT_AVDTP_STATE_CLOSING 0x05
#define BT_AVDTP_STATE_ABORT 0x06
#define BT_AVDTP_STATE_SIG_CONNECTED 0x07
#define BT_AVDTP_STATE_SIG_DISCONNECTED 0x08
#define BT_AVDTP_STATE_INVALID 0x00
/* @brief AVDTP Media TYPE */
#define BT_AVDTP_SERVICE_CAT_MEDIA_TRANSPORT 0x01
#define BT_AVDTP_SERVICE_CAT_REPORTING 0x02
#define BT_AVDTP_SERVICE_CAT_RECOVERY 0x03
#define BT_AVDTP_SERVICE_CAT_CONTENT_PROTECTION 0x04
#define BT_AVDTP_SERVICE_CAT_HDR_COMPRESSION 0x05
#define BT_AVDTP_SERVICE_CAT_MULTIPLEXING 0x06
#define BT_AVDTP_SERVICE_CAT_MEDIA_CODEC 0x07
#define BT_AVDTP_SERVICE_CAT_DELAYREPORTING 0x08
/* AVDTP Error Codes */
#define BT_AVDTP_SUCCESS 0x00
#define BT_AVDTP_ERR_BAD_HDR_FORMAT 0x01
#define BT_AVDTP_ERR_BAD_LENGTH 0x11
#define BT_AVDTP_ERR_BAD_ACP_SEID 0x12
#define BT_AVDTP_ERR_SEP_IN_USE 0x13
#define BT_AVDTP_ERR_SEP_NOT_IN_USE 0x14
#define BT_AVDTP_ERR_BAD_SERV_CATEGORY 0x17
#define BT_AVDTP_ERR_BAD_PAYLOAD_FORMAT 0x18
#define BT_AVDTP_ERR_NOT_SUPPORTED_COMMAND 0x19
#define BT_AVDTP_ERR_INVALID_CAPABILITIES 0x1a
#define BT_AVDTP_ERR_BAD_RECOVERY_TYPE 0x22
#define BT_AVDTP_ERR_BAD_MEDIA_TRANSPORT_FORMAT 0x23
#define BT_AVDTP_ERR_BAD_RECOVERY_FORMAT 0x25
#define BT_AVDTP_ERR_BAD_ROHC_FORMAT 0x26
#define BT_AVDTP_ERR_BAD_CP_FORMAT 0x27
#define BT_AVDTP_ERR_BAD_MULTIPLEXING_FORMAT 0x28
#define BT_AVDTP_ERR_UNSUPPORTED_CONFIGURAION 0x29
#define BT_AVDTP_ERR_BAD_STATE 0x31
#define BT_AVDTP_MIN_MTU 48
#define BT_AVDTP_MAX_MTU CONFIG_BLUETOOTH_L2CAP_IN_MTU
#define BT_AVDTP_MIN_SEID 0x01
#define BT_AVDTP_MAX_SEID 0x3E
/* Helper to calculate needed outgoing buffer size. */
#define BT_AVDTP_BUF_SIZE(mtu) (CONFIG_BLUETOOTH_HCI_SEND_RESERVE + \
sizeof(struct bt_hci_acl_hdr) + \
sizeof(struct bt_l2cap_hdr) + \
BT_AVDTP_SIG_HDR_LEN + (mtu))
struct bt_avdtp_single_sig_hdr {
uint8_t hdr;
uint8_t signal_id;
} __packed;
#define BT_AVDTP_SIG_HDR_LEN sizeof(struct bt_avdtp_single_sig_hdr)
struct bt_avdtp_cfm_cb {
/*
* Discovery_cfm;
* get_capabilities_cfm;
* set_configuration_cfm;
* open_cfm;
* start_cfm;
* suspend_cfm;
* close_cfm;
*/
};
struct bt_avdtp_ind_cb {
/*
* discovery_ind;
* get_capabilities_ind;
* set_configuration_ind;
* open_ind;
* start_ind;
* suspend_ind;
* close_ind;
*/
};
struct bt_avdtp_event_cb {
struct bt_avdtp_ind_cb *ind;
struct bt_avdtp_cfm_cb *cfm;
};
/** @brief Global AVDTP session structure. */
struct bt_avdtp {
struct bt_l2cap_br_chan br_chan;
uint8_t state; /* current state of AVDTP*/
};
/* Initialize AVDTP layer*/
int bt_avdtp_init(void);
/* Application register with AVDTP layer */
int bt_avdtp_register(struct bt_avdtp_event_cb *cb);
/* AVDTP connect */
int bt_avdtp_connect(struct bt_conn *conn, struct bt_avdtp *session);
/* AVDTP disconnect */
int bt_avdtp_disconnect(struct bt_avdtp *session);
/* AVDTP SEP register function */
int bt_avdtp_register_sep(uint8_t media_type, uint8_t role,
struct bt_avdtp_seid_lsep *sep);
| Jason0204/jasontek_f103rb-zephyrOS-project | subsys/bluetooth/host/avdtp_internal.h | C | apache-2.0 | 5,327 |
/**
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* under the License.
*/
package org.apache.hadoop.hbase.filter;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
/**
*/
@Category(MediumTests.class)
public class TestFuzzyRowAndColumnRangeFilter {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final Log LOG = LogFactory.getLog(this.getClass());
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.
}
@Test
public void Test() throws Exception {
String cf = "f";
String table = "TestFuzzyAndColumnRangeFilterClient";
HTable ht = TEST_UTIL.createTable(Bytes.toBytes(table),
Bytes.toBytes(cf), Integer.MAX_VALUE);
// 10 byte row key - (2 bytes 4 bytes 4 bytes)
// 4 byte qualifier
// 4 byte value
for (int i1 = 0; i1 < 2; i1++) {
for (int i2 = 0; i2 < 5; i2++) {
byte[] rk = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(rk);
buf.clear();
buf.putShort((short) 2);
buf.putInt(i1);
buf.putInt(i2);
for (int c = 0; c < 5; c++) {
byte[] cq = new byte[4];
Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
+ Bytes.toStringBinary(cq));
}
}
}
TEST_UTIL.flush();
// test passes
runTest(ht, 0, 10);
// test fails
runTest(ht, 1, 8);
}
private void runTest(HTable hTable, int cqStart, int expectedSize) throws IOException {
// [0, 2, ?, ?, ?, ?, 0, 0, 0, 1]
byte[] fuzzyKey = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(fuzzyKey);
buf.clear();
buf.putShort((short) 2);
for (int i = 0; i < 4; i++)
buf.put((byte)63);
buf.putInt((short)1);
byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0};
Pair<byte[], byte[]> pair = new Pair<byte[], byte[]>(fuzzyKey, mask);
FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair));
ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true
, Bytes.toBytes(4), true);
//regular test
runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter);
//reverse filter order test
runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter);
}
private void runScanner(HTable hTable, int expectedSize, Filter... filters) throws IOException {
String cf = "f";
Scan scan = new Scan();
scan.addFamily(cf.getBytes());
FilterList filterList = new FilterList(filters);
scan.setFilter(filterList);
ResultScanner scanner = hTable.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result result;
long timeBeforeScan = System.currentTimeMillis();
while ((result = scanner.next()) != null) {
for (Cell kv : result.listCells()) {
LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
results.add(kv);
}
}
long scanTime = System.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
assertEquals(expectedSize, results.size());
}
} | intel-hadoop/hbase-rhino | hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java | Java | apache-2.0 | 5,447 |
/*
* Trap
* An anti-pryer server for better privacy
*
* This file is a part of Trap project
*
* Copyright 2016 Rain Lee <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package logger
import (
"github.com/raincious/trap/trap/core/types"
"bufio"
"fmt"
"time"
)
type FilePrinter struct {
writer *bufio.Writer
writeCounts uint16
}
func NewFilePrinter(w *bufio.Writer) (*FilePrinter, *types.Throw) {
_, writeErr := w.Write([]byte(""))
if writeErr != nil {
return nil, types.ConvertError(writeErr)
}
return &FilePrinter{
writer: w,
}, nil
}
func (l *FilePrinter) save(w types.String, c types.String,
t time.Time, m types.String) {
_, err := l.writer.WriteString(fmt.Sprintf("<%s> %s [%s]: %s\r\n",
w, c, t.Format(time.StampMilli), m))
if err != nil {
panic(fmt.Errorf("Can't write log file due to error: %s", err))
}
l.writeCounts += 1
if l.writeCounts > 10 {
l.writer.Flush()
l.writeCounts = 0
}
}
func (l *FilePrinter) Info(c types.String, t time.Time, m types.String) {
l.save("INF", c, t, m)
}
func (l *FilePrinter) Debug(c types.String, t time.Time, m types.String) {
l.save("DBG", c, t, m)
}
func (l *FilePrinter) Warning(c types.String, t time.Time, m types.String) {
l.save("WRN", c, t, m)
}
func (l *FilePrinter) Error(c types.String, t time.Time, m types.String) {
l.save("ERR", c, t, m)
}
func (l *FilePrinter) Print(c types.String, t time.Time, m types.String) {
l.save("DEF", c, t, m)
}
| raincious/trap | trap/logger/file.go | GO | apache-2.0 | 1,990 |
local json = require "cjson"
local http_client = require "kong.tools.http_client"
local spec_helper = require "spec.spec_helpers"
local BASE_URL = spec_helper.API_URL.."/apis/%s/plugins/"
describe("Rate Limiting API", function()
setup(function()
spec_helper.prepare_db()
spec_helper.insert_fixtures {
api = {
{ name = "tests-rate-limiting1", request_host = "test1.com", upstream_url = "http://mockbin.com" }
}
}
spec_helper.start_kong()
local response = http_client.get(spec_helper.API_URL.."/apis/")
BASE_URL = string.format(BASE_URL, json.decode(response).data[1].id)
end)
teardown(function()
spec_helper.stop_kong()
end)
describe("POST", function()
it("should not save with empty config", function()
local response, status = http_client.post(BASE_URL, { name = "rate-limiting" })
local body = json.decode(response)
assert.are.equal(400, status)
assert.are.equal("You need to set at least one limit: second, minute, hour, day, month, year", body.message)
end)
it("should save with proper config", function()
local response, status = http_client.post(BASE_URL, { name = "rate-limiting", ["config.second"] = 10 })
local body = json.decode(response)
assert.are.equal(201, status)
assert.are.equal(10, body.config.second)
end)
end)
end)
| vzaramel/kong | spec/plugins/rate-limiting/api_spec.lua | Lua | apache-2.0 | 1,368 |
define([
'jquery',
'underscore',
'backbone',
'app'
],
function (
$, _, Backbone, app
) {
var Models = {},
Collections = {},
Views = {};
Models.Project = Backbone.Model.extend();
Collections.Projects = Backbone.Model.extend({
model: Models.Project,
url: function() {
return app.api('projects/' + this.get('platform') +
(this.get('uri') ? '/' + this.get('uri') : ''));
},
parse: function(res) {
return { projects: res };
}
});
Models.Project = Backbone.Model.extend({
url: function() {
return app.api('projects?' + this.get('params'));
}
});
return {
Models: Models,
Collections: Collections,
Views: Views
};
}); | Redmart/os-mobilizer | frontend/source/js/modules/Projects.js | JavaScript | apache-2.0 | 666 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import sys
import time
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
# For Cloud environment, add parent directory for imports
sys.path.append(os.path.dirname(os.path.abspath(sys.path[0])))
from official.resnet import imagenet_input # pylint: disable=g-import-not-at-top
from official.resnet import resnet_main
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.python.estimator import estimator
FLAGS = tf.flags.FLAGS
CKPT_PATTERN = r'model\.ckpt-(?P<gs>[0-9]+)\.data'
flags.DEFINE_string(
'data_dir_small', default=None,
help=('The directory where the resized (160x160) ImageNet input data is '
'stored. This is only to be used in conjunction with the '
'resnet_benchmark.py script.'))
flags.DEFINE_bool(
'use_fast_lr', default=False,
help=('Enabling this uses a faster learning rate schedule along with '
'different image sizes in the input pipeline. This is only to be '
'used in conjunction with the resnet_benchmark.py script.'))
# Number of training and evaluation images in the standard ImageNet dataset
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000
def main(unused_argv):
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2)) # pylint: disable=line-too-long
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
if FLAGS.use_fast_lr:
resnet_main.LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 4), (0.1, 21), (0.01, 35), (0.001, 43)
]
imagenet_train_small = imagenet_input.ImageNetInput(
is_training=True,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_eval_small = imagenet_input.ImageNetInput(
is_training=False,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_train_large = imagenet_input.ImageNetInput(
is_training=True,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval_large = imagenet_input.ImageNetInput(
is_training=False,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
resnet_classifier = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_main.resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.mode == 'train':
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' % (FLAGS.train_steps,
FLAGS.train_steps / batches_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
# Write a dummy file at the start of training so that we can measure the
# runtime at each checkpoint from the file write time.
tf.gfile.MkDir(FLAGS.model_dir)
if not tf.gfile.Exists(os.path.join(FLAGS.model_dir, 'START')):
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'START'), 'w') as f:
f.write(str(start_timestamp))
if FLAGS.use_fast_lr:
small_steps = int(18 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
normal_steps = int(41 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
large_steps = int(min(50 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size,
FLAGS.train_steps))
resnet_classifier.train(
input_fn=imagenet_train_small.input_fn, max_steps=small_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=normal_steps)
resnet_classifier.train(
input_fn=imagenet_train_large.input_fn,
max_steps=large_steps)
else:
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
else:
assert FLAGS.mode == 'eval'
start_timestamp = tf.gfile.Stat(
os.path.join(FLAGS.model_dir, 'START')).mtime_nsec
results = []
eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size
ckpt_steps = set()
all_files = tf.gfile.ListDirectory(FLAGS.model_dir)
for f in all_files:
mat = re.match(CKPT_PATTERN, f)
if mat is not None:
ckpt_steps.add(int(mat.group('gs')))
ckpt_steps = sorted(list(ckpt_steps))
tf.logging.info('Steps to be evaluated: %s' % str(ckpt_steps))
for step in ckpt_steps:
ckpt = os.path.join(FLAGS.model_dir, 'model.ckpt-%d' % step)
batches_per_epoch = NUM_TRAIN_IMAGES // FLAGS.train_batch_size
current_epoch = step // batches_per_epoch
if FLAGS.use_fast_lr:
if current_epoch < 18:
eval_input_fn = imagenet_eval_small.input_fn
if current_epoch >= 18 and current_epoch < 41:
eval_input_fn = imagenet_eval.input_fn
if current_epoch >= 41: # 41:
eval_input_fn = imagenet_eval_large.input_fn
else:
eval_input_fn = imagenet_eval.input_fn
end_timestamp = tf.gfile.Stat(ckpt + '.index').mtime_nsec
elapsed_hours = (end_timestamp - start_timestamp) / (1e9 * 3600.0)
tf.logging.info('Starting to evaluate.')
eval_start = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
eval_time = int(time.time() - eval_start)
tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
(eval_results, eval_time))
results.append([
current_epoch,
elapsed_hours,
'%.2f' % (eval_results['top_1_accuracy'] * 100),
'%.2f' % (eval_results['top_5_accuracy'] * 100),
])
time.sleep(60)
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.tsv'), 'wb') as tsv_file: # pylint: disable=line-too-long
writer = csv.writer(tsv_file, delimiter='\t')
writer.writerow(['epoch', 'hours', 'top1Accuracy', 'top5Accuracy'])
writer.writerows(results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| tensorflow/tpu | models/official/resnet/benchmark/resnet_benchmark.py | Python | apache-2.0 | 8,651 |
<?php
require_once "db_config.php";
session_start();
if(!array_key_exists('id', $_SESSION))
{
header("location : index.php");
}
?>
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title></title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<link rel="stylesheet" href="css/bootstrap-responsive.min.css">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.2-respond-1.1.0.min.js"></script>
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> or <a href="http://www.google.com/chromeframe/?redirect=true">activate Google Chrome Frame</a> to improve your experience.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<!-- <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</a>
<a class="brand" href="#">Project name</a>
<div class="nav-collapse collapse">
<ul class="nav">
<li class="active"><a href="#">Home</a></li>
<li><a href="#about">About</a></li>
<li><a href="#contact">Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="#">Action</a></li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
</ul>
<form class="navbar-form pull-right" action='login.php' method='post'>
<input class="span2" type="text" placeholder="user" name='user' id='user'>
<input class="span2" type="password" placeholder="password" name='password' id='password'>
<input type='submit'class="btn" name='Submit' value='Submit' />
</form>
</div><!--/.nav-collapse
</div>
</div>
</div>
-->
<?php require_once "menu_func.php"; ?>
<div class="container">
<!-- Main hero unit for a primary marketing message or call to action -->
<div class="hero-unit">
<h1>Добре дошъл <?php echo $_SESSION['user']; ?></h1>
<p>Да по едитнем малко ? </p>
</div>
<!-- Example row of columns -->
<div class="row">
<div class="span4">
<h2>Добавяне на Меню</h2>
<p> <form class="navbar-form pull-right" action="addmenu.php" method="post">
<input class="span2" type="text" placeholder="Име" name="menu_name" id="menu_name"><br />
<input class="span2" type="text" placeholder="Линк" name="menu_addr" id="menu_addr"><br />
<select name="menu_type" id="menu_type" >
<option value ="top">Меню</option>
<option value ="dropdown">Drop</option>
<option value ="usermenu">User</option>
</select>
<br />
<input type="submit"class="btn" name="Submit" value="Submit" />
</form>
</p>
</div>
<!-- <div class="span4">
<h2>Heading</h2>
<p>Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui. </p>
<p><a class="btn" href="#">View details »</a></p>
</div>
<div class="span4">
<h2>Heading</h2>
<p>Donec sed odio dui. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Vestibulum id ligula porta felis euismod semper. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus.</p>
<p><a class="btn" href="#">View details »</a></p>
</div> -->
</div>
<hr>
<footer>
<p>© Company 2012</p>
</footer>
</div> <!-- /container -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/vendor/jquery-1.9.1.min.js"><\/script>')</script>
<script src="js/vendor/bootstrap.min.js"></script>
<script src="js/main.js"></script>
<script>
var _gaq=[['_setAccount','UA-XXXXX-X'],['_trackPageview']];
(function(d,t){var g=d.createElement(t),s=d.getElementsByTagName(t)[0];
g.src=('https:'==location.protocol?'//ssl':'//www')+'.google-analytics.com/ga.js';
s.parentNode.insertBefore(g,s)}(document,'script'));
</script>
</body>
</html>
| PuloV/homepage | admin/acp.php | PHP | apache-2.0 | 6,900 |
package pl.matisoft.soy.config;
import com.google.template.soy.jssrc.SoyJsSrcOptions;
import com.google.template.soy.tofu.SoyTofuOptions;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.context.support.ServletContextResource;
import org.springframework.web.servlet.ViewResolver;
import pl.matisoft.soy.ContentNegotiator;
import pl.matisoft.soy.DefaultContentNegotiator;
import pl.matisoft.soy.SoyTemplateViewResolver;
import pl.matisoft.soy.bundle.DefaultSoyMsgBundleResolver;
import pl.matisoft.soy.bundle.SoyMsgBundleResolver;
import pl.matisoft.soy.compile.DefaultTofuCompiler;
import pl.matisoft.soy.compile.TofuCompiler;
import pl.matisoft.soy.data.DefaultToSoyDataConverter;
import pl.matisoft.soy.data.ToSoyDataConverter;
import pl.matisoft.soy.data.adjust.ModelAdjuster;
import pl.matisoft.soy.data.adjust.SpringModelAdjuster;
import pl.matisoft.soy.global.compile.CompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.compile.EmptyCompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.runtime.EmptyGlobalRuntimeModelResolver;
import pl.matisoft.soy.global.runtime.GlobalRuntimeModelResolver;
import pl.matisoft.soy.holder.CompiledTemplatesHolder;
import pl.matisoft.soy.holder.DefaultCompiledTemplatesHolder;
import pl.matisoft.soy.locale.LocaleProvider;
import pl.matisoft.soy.locale.SpringLocaleProvider;
import pl.matisoft.soy.render.DefaultTemplateRenderer;
import pl.matisoft.soy.render.TemplateRenderer;
import pl.matisoft.soy.template.DefaultTemplateFilesResolver;
import pl.matisoft.soy.template.TemplateFilesResolver;
import javax.inject.Inject;
import javax.servlet.ServletContext;
/**
* Created with IntelliJ IDEA.
* User: mati
* Date: 12/11/2013
* Time: 19:55
*/
@Configuration
public class SpringSoyViewBaseConfig {
@Value("${soy.hot.reload.mode:false}")
private boolean hotReloadMode;
@Value("${soy.templates.resolve.recursively:true}")
private boolean recursive;
@Value("${soy.templates.file.extension:soy}")
private String fileExtension;
@Value("${soy.templates.directory:/WEB-INF/templates}")
private String templatesPath;
@Value("${soy.i18n.xliff.path:xliffs/messages}")
private String messagesPath;
@Value("${soy.encoding:utf-8}")
private String encoding;
@Value("${soy.i18n.fallback.to.english:true}")
private boolean fallbackToEnglish;
@Value("${soy.preCompile.templates:false}")
private boolean preCompileTemplates;
@Value("${soy.indexView:index}")
private String indexView;
@Value("${soy.logical.prefix:soy:}")
private String logicalPrefix;
@Value("${soy.resolver.order:2147483647}")
private int order;
@Inject
private ServletContext servletContext;
@Bean
public LocaleProvider soyLocaleProvider() {
return new SpringLocaleProvider();
}
@Bean
public DefaultTemplateFilesResolver soyTemplateFilesResolver() throws Exception {
final DefaultTemplateFilesResolver defaultTemplateFilesResolver = new DefaultTemplateFilesResolver();
defaultTemplateFilesResolver.setHotReloadMode(hotReloadMode);
defaultTemplateFilesResolver.setRecursive(recursive);
defaultTemplateFilesResolver.setFilesExtension(fileExtension);
defaultTemplateFilesResolver.setTemplatesLocation(new ServletContextResource(servletContext, templatesPath));
return defaultTemplateFilesResolver;
}
@Bean
public CompileTimeGlobalModelResolver soyCompileTimeGlobalModelResolver() {
return new EmptyCompileTimeGlobalModelResolver();
}
@Bean
public ToSoyDataConverter soyToSoyDataConverter() {
return new DefaultToSoyDataConverter();
}
@Bean
public SoyJsSrcOptions soyJsSourceOptions() {
return new SoyJsSrcOptions();
}
@Bean
public SoyTofuOptions soyTofuOptions() {
final SoyTofuOptions soyTofuOptions = new SoyTofuOptions();
soyTofuOptions.setUseCaching(!hotReloadMode);
return soyTofuOptions;
}
@Bean
public TofuCompiler soyTofuCompiler(final CompileTimeGlobalModelResolver compileTimeGlobalModelResolver, final SoyJsSrcOptions soyJsSrcOptions, final SoyTofuOptions soyTofuOptions) {
final DefaultTofuCompiler defaultTofuCompiler = new DefaultTofuCompiler();
defaultTofuCompiler.setHotReloadMode(hotReloadMode);
defaultTofuCompiler.setCompileTimeGlobalModelResolver(compileTimeGlobalModelResolver);
defaultTofuCompiler.setSoyJsSrcOptions(soyJsSrcOptions);
defaultTofuCompiler.setSoyTofuOptions(soyTofuOptions);
return defaultTofuCompiler;
}
@Bean
public SoyMsgBundleResolver soyMsgBundleResolver() {
final DefaultSoyMsgBundleResolver defaultSoyMsgBundleResolver = new DefaultSoyMsgBundleResolver();
defaultSoyMsgBundleResolver.setHotReloadMode(hotReloadMode);
defaultSoyMsgBundleResolver.setMessagesPath(messagesPath);
defaultSoyMsgBundleResolver.setFallbackToEnglish(fallbackToEnglish);
return defaultSoyMsgBundleResolver;
}
@Bean
public CompiledTemplatesHolder soyTemplatesHolder(final TemplateFilesResolver templateFilesResolver, final TofuCompiler tofuCompiler) throws Exception {
final DefaultCompiledTemplatesHolder defaultCompiledTemplatesHolder = new DefaultCompiledTemplatesHolder();
defaultCompiledTemplatesHolder.setHotReloadMode(hotReloadMode);
defaultCompiledTemplatesHolder.setPreCompileTemplates(preCompileTemplates);
defaultCompiledTemplatesHolder.setTemplatesFileResolver(templateFilesResolver);
defaultCompiledTemplatesHolder.setTofuCompiler(tofuCompiler);
return defaultCompiledTemplatesHolder;
}
@Bean
public TemplateRenderer soyTemplateRenderer(final ToSoyDataConverter toSoyDataConverter) {
final DefaultTemplateRenderer defaultTemplateRenderer = new DefaultTemplateRenderer();
defaultTemplateRenderer.setHotReloadMode(hotReloadMode);
defaultTemplateRenderer.setToSoyDataConverter(toSoyDataConverter);
return defaultTemplateRenderer;
}
@Bean
public ModelAdjuster soySpringModelAdjuster() {
return new SpringModelAdjuster();
}
@Bean
public GlobalRuntimeModelResolver soyGlobalRuntimeModelResolver() {
return new EmptyGlobalRuntimeModelResolver();
}
@Bean
public ContentNegotiator contentNegotiator() {
return new DefaultContentNegotiator();
}
@Bean
public ViewResolver soyViewResolver(final CompiledTemplatesHolder compiledTemplatesHolder,
final ModelAdjuster modelAdjuster,
final TemplateRenderer templateRenderer,
final LocaleProvider localeProvider,
final GlobalRuntimeModelResolver globalRuntimeModelResolver,
final ContentNegotiator contentNegotiator,
final SoyMsgBundleResolver msgBundleResolver)
throws Exception {
final SoyTemplateViewResolver soyTemplateViewResolver = new SoyTemplateViewResolver();
soyTemplateViewResolver.setSoyMsgBundleResolver(msgBundleResolver);
soyTemplateViewResolver.setCompiledTemplatesHolder(compiledTemplatesHolder);
soyTemplateViewResolver.setEncoding(encoding);
soyTemplateViewResolver.setGlobalRuntimeModelResolver(globalRuntimeModelResolver);
soyTemplateViewResolver.setHotReloadMode(hotReloadMode);
soyTemplateViewResolver.setIndexView(indexView);
soyTemplateViewResolver.setLocaleProvider(localeProvider);
soyTemplateViewResolver.setModelAdjuster(modelAdjuster);
soyTemplateViewResolver.setTemplateRenderer(templateRenderer);
soyTemplateViewResolver.setPrefix(logicalPrefix);
soyTemplateViewResolver.setOrder(order);
soyTemplateViewResolver.setRedirectContextRelative(true);
soyTemplateViewResolver.setRedirectHttp10Compatible(true);
soyTemplateViewResolver.setContentNegotiator(contentNegotiator);
return soyTemplateViewResolver;
}
}
| matiwinnetou/spring-soy-view | spring-soy-view/src/main/java/pl/matisoft/soy/config/SpringSoyViewBaseConfig.java | Java | apache-2.0 | 8,365 |
package userstoreauth.servlets;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import userstoreauth.model.UserVer2;
import userstoreauth.service.UserStoreMb;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class EditUserTest {
@BeforeEach
void setUp() {
UserStoreMb us = new UserStoreMb();
us.deleteAll();
}
@Test
void editUser() throws ServletException, IOException {
EditUser editUser = new EditUser();
UserStoreMb us = new UserStoreMb();
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
when(request.getParameter("login")).thenReturn("login");
when(request.getParameter("password")).thenReturn("password0");
when(request.getParameter("name")).thenReturn("name0");
when(request.getParameter("email")).thenReturn("email0");
when(request.getParameter("role")).thenReturn("admin");
when(request.getParameter("country")).thenReturn("Россия");
when(request.getParameter("city")).thenReturn("Москва");
UserVer2 user = new UserVer2("login", "password", "name", "email", "Россия", "Москва", Timestamp.valueOf(LocalDateTime.now()), "user");
us.addUser(user);
assertEquals(user, us.getByLogin("login"));
editUser.doPost(request, response);
user.setPassword("password0");
user.setName("name0");
user.setEmail("email0");
user.setRole("admin");
assertEquals(user, us.getByLogin("login"));
}
}
| HeTyDeHer/ZapovA | chapter_009/src/test/java/userstoreauth/servlets/EditUserTest.java | Java | apache-2.0 | 1,929 |
from socket import inet_ntoa
from struct import pack
def calcDottedNetmask(mask):
bits = 0
for i in xrange(32 - mask, 32):
bits |= (1 << i)
packed_value = pack('!I', bits)
addr = inet_ntoa(packed_value)
return addr
| openbmc/openbmc-test-automation | lib/pythonutil.py | Python | apache-2.0 | 245 |
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkLaplacianImageFilter_h
#define itkLaplacianImageFilter_h
#include "itkImageToImageFilter.h"
namespace itk
{
/**
* \class LaplacianImageFilter
*
* This filter computes the Laplacian of a scalar-valued image. The Laplacian
* is an isotropic measure of the 2nd spatial derivative of an image. The
* Laplacian of an image highlights regions of rapid intensity change and is
* therefore often used for edge detection. Often, the Laplacian is applied to
* an image that has first been smoothed with a Gaussian filter in order to
* reduce its sensitivity to noise.
*
* \par
* The Laplacian at each pixel location is computed by convolution with the
* itk::LaplacianOperator.
*
* \par Inputs and Outputs
* The input to this filter is a scalar-valued itk::Image of arbitrary
* dimension. The output is a scalar-valued itk::Image.
*
* \warning The pixel type of the input and output images must be of real type
* (float or double). ConceptChecking is used here to enforce the input pixel
* type. You will get a compilation error if the pixel type of the input and
* output images is not float or double.
*
* \sa Image
* \sa Neighborhood
* \sa NeighborhoodOperator
* \sa NeighborhoodIterator
* \sa LaplacianOperator
*
* \ingroup ImageFeatureExtraction
* \ingroup ITKImageFeature
*
* \wiki
* \wikiexample{ImageProcessing/LaplacianImageFilter,Compute the Laplacian of an image}
* \endwiki
*/
template< typename TInputImage, typename TOutputImage >
class LaplacianImageFilter:
public ImageToImageFilter< TInputImage, TOutputImage >
{
public:
/** Standard "Self" & Superclass typedef. */
typedef LaplacianImageFilter Self;
typedef ImageToImageFilter< TInputImage, TOutputImage > Superclass;
/** Extract some information from the image types. Dimensionality
* of the two images is assumed to be the same. */
typedef typename TOutputImage::PixelType OutputPixelType;
typedef typename TOutputImage::InternalPixelType OutputInternalPixelType;
typedef typename TInputImage::PixelType InputPixelType;
typedef typename TInputImage::InternalPixelType InputInternalPixelType;
itkStaticConstMacro(InputImageDimension, unsigned int,
TInputImage::ImageDimension);
itkStaticConstMacro(ImageDimension, unsigned int,
TOutputImage::ImageDimension);
/** Image typedef support. */
typedef TInputImage InputImageType;
typedef TOutputImage OutputImageType;
typedef typename InputImageType::Pointer InputImagePointer;
/** Smart pointer typedef support. */
typedef SmartPointer< Self > Pointer;
typedef SmartPointer< const Self > ConstPointer;
/** Run-time type information (and related methods) */
itkTypeMacro(LaplacianImageFilter, ImageToImageFilter);
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** LaplacianImageFilter needs a larger input requested region than
* the output requested region (larger in the direction of the
* derivative). As such, LaplacianImageFilter needs to provide an
* implementation for GenerateInputRequestedRegion() in order to
* inform the pipeline execution model.
*
* \sa ImageToImageFilter::GenerateInputRequestedRegion() */
virtual void GenerateInputRequestedRegion() ITK_OVERRIDE;
/** Enable/Disable using the image spacing information in
* calculations. Use this option if you want derivatives in
* physical space. Default is UseImageSpacingOn. */
itkBooleanMacro( UseImageSpacing );
/** Set/Get whether or not the filter will use the spacing of the input
image in its calculations */
itkSetMacro(UseImageSpacing, bool);
itkGetConstMacro(UseImageSpacing, bool);
#ifdef ITK_USE_CONCEPT_CHECKING
// Begin concept checking
itkConceptMacro( SameDimensionCheck,
( Concept::SameDimension< InputImageDimension, ImageDimension > ) );
itkConceptMacro( InputPixelTypeIsFloatingPointCheck,
( Concept::IsFloatingPoint< InputPixelType > ) );
itkConceptMacro( OutputPixelTypeIsFloatingPointCheck,
( Concept::IsFloatingPoint< OutputPixelType > ) );
// End concept checking
#endif
protected:
LaplacianImageFilter()
{
m_UseImageSpacing = true;
}
virtual ~LaplacianImageFilter() {}
/** Standard pipeline method. While this class does not implement a
* ThreadedGenerateData(), its GenerateData() delegates all
* calculations to an NeighborhoodOperatorImageFilter. Since the
* NeighborhoodOperatorImageFilter is multithreaded, this filter is
* multithreaded by default. */
void GenerateData() ITK_OVERRIDE;
void PrintSelf(std::ostream &, Indent) const ITK_OVERRIDE;
private:
ITK_DISALLOW_COPY_AND_ASSIGN(LaplacianImageFilter);
bool m_UseImageSpacing;
};
} // end namespace itk
#ifndef ITK_MANUAL_INSTANTIATION
#include "itkLaplacianImageFilter.hxx"
#endif
#endif
| zachary-williamson/ITK | Modules/Filtering/ImageFeature/include/itkLaplacianImageFilter.h | C | apache-2.0 | 5,759 |
import pytest
import salt.engines
from tests.support.mock import MagicMock, patch
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")
| saltstack/salt | tests/pytests/unit/engines/test_engines.py | Python | apache-2.0 | 595 |
//-------------------------------------------------------------------------------------
// ExportXmlParser.cpp
//
// Simple callback non-validating XML parser implementation.
//
// Microsoft XNA Developer Connection.
// Copyright © Microsoft Corporation. All rights reserved.
//-------------------------------------------------------------------------------------
#include "stdafx.h"
#include "ExportXmlParser.h"
namespace ATG
{
//-------------------------------------------------------------------------------------
// Name: XMLParser::XMLParser
//-------------------------------------------------------------------------------------
XMLParser::XMLParser()
{
m_pWritePtr = m_pWriteBuf;
m_pReadPtr = m_pReadBuf;
m_pISAXCallback = NULL;
m_hFile = INVALID_HANDLE_VALUE;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::~XMLParser
//-------------------------------------------------------------------------------------
XMLParser::~XMLParser()
{
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::FillBuffer
// Desc: Reads a block from the current open file
//-------------------------------------------------------------------------------------
VOID XMLParser::FillBuffer()
{
DWORD NChars;
m_pReadPtr = m_pReadBuf;
if( m_hFile == NULL )
{
if( m_uInXMLBufferCharsLeft > XML_READ_BUFFER_SIZE )
NChars = XML_READ_BUFFER_SIZE;
else
NChars = m_uInXMLBufferCharsLeft;
CopyMemory( m_pReadBuf, m_pInXMLBuffer, NChars );
m_uInXMLBufferCharsLeft -= NChars;
m_pInXMLBuffer += NChars;
}
else
{
ReadFile( m_hFile, m_pReadBuf, XML_READ_BUFFER_SIZE, &NChars, NULL );
}
m_dwCharsConsumed += NChars;
__int64 iProgress = ( (__int64)m_dwCharsConsumed * 1000 ) / (__int64)m_dwCharsTotal;
m_pISAXCallback->SetParseProgress( (DWORD)iProgress );
m_pReadBuf[ NChars ] = '\0';
m_pReadBuf[ NChars + 1] = '\0';
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::SkipNextAdvance
// Desc: Puts the last character read back on the input stream
//-------------------------------------------------------------------------------------
VOID XMLParser::SkipNextAdvance()
{
m_bSkipNextAdvance = TRUE;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ConsumeSpace
// Desc: Skips spaces in the current stream
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ConsumeSpace()
{
HRESULT hr;
// Skip spaces
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
while ( ( m_Ch == ' ' ) || ( m_Ch == '\t' ) ||
( m_Ch == '\n' ) || ( m_Ch == '\r' ) )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
SkipNextAdvance();
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ConvertEscape
// Desc: Copies and converts an escape sequence into m_pWriteBuf
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ConvertEscape()
{
HRESULT hr;
WCHAR wVal = 0;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// all escape sequences start with &, so ignore the first character
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '#' ) // character as hex or decimal
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == 'x' ) // hex number
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
while ( m_Ch != ';' )
{
wVal *= 16;
if ( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) )
{
wVal += m_Ch - '0';
}
else if ( ( m_Ch >= 'a' ) && ( m_Ch <= 'f' ) )
{
wVal += m_Ch - 'a' + 10;
}
else if ( ( m_Ch >= 'A' ) && ( m_Ch <= 'F' ) )
{
wVal += m_Ch - 'A' + 10;
}
else
{
Error( E_INVALID_XML_SYNTAX, "Expected hex digit as part of &#x escape sequence" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
}
else // decimal number
{
while ( m_Ch != ';' )
{
wVal *= 10;
if ( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) )
{
wVal += m_Ch - '0';
}
else
{
Error( E_INVALID_XML_SYNTAX, "Expected decimal digit as part of &# escape sequence" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
}
// copy character into the buffer
m_Ch = wVal;
return S_OK;
}
// must be an entity reference
WCHAR *pEntityRefVal = m_pWritePtr;
UINT EntityRefLen;
SkipNextAdvance();
if( FAILED( hr = AdvanceName() ) )
return hr;
EntityRefLen = (UINT)( m_pWritePtr - pEntityRefVal );
m_pWritePtr = pEntityRefVal;
if ( EntityRefLen == 0 )
{
Error( E_INVALID_XML_SYNTAX, "Expecting entity name after &" );
return E_INVALID_XML_SYNTAX;
}
if( !wcsncmp( pEntityRefVal, L"lt", EntityRefLen ) )
wVal = '<';
else if( !wcsncmp( pEntityRefVal, L"gt", EntityRefLen ) )
wVal = '>';
else if( !wcsncmp( pEntityRefVal, L"amp", EntityRefLen ) )
wVal = '&';
else if( !wcsncmp( pEntityRefVal, L"apos", EntityRefLen ) )
wVal = '\'';
else if( !wcsncmp( pEntityRefVal, L"quot", EntityRefLen ) )
wVal = '"';
else
{
Error( E_INVALID_XML_SYNTAX, "Unrecognized entity name after & - (should be lt, gt, amp, apos, or quot)" );
return E_INVALID_XML_SYNTAX; // return false if unrecognized token sequence
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != ';' )
{
Error( E_INVALID_XML_SYNTAX, "Expected terminating ; for entity reference" );
return E_INVALID_XML_SYNTAX; // malformed reference - needs terminating ;
}
m_Ch = wVal;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceAttrVal
// Desc: Copies an attribute value into m_pWrite buf, skipping surrounding quotes
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceAttrVal()
{
HRESULT hr;
WCHAR wQuoteChar;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( ( m_Ch != '"' ) && ( m_Ch != '\'' ) )
{
Error( E_INVALID_XML_SYNTAX, "Attribute values must be enclosed in quotes" );
return E_INVALID_XML_SYNTAX;
}
wQuoteChar = m_Ch;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
else if( m_Ch == wQuoteChar )
break;
else if( m_Ch == '&' )
{
SkipNextAdvance();
if( FAILED( hr = ConvertEscape() ) )
return hr;
}
else if( m_Ch == '<' )
{
Error( E_INVALID_XML_SYNTAX, "Illegal character '<' in element tag" );
return E_INVALID_XML_SYNTAX;
}
// copy character into the buffer
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
Error( E_INVALID_XML_SYNTAX, "Total element tag size may not be more than %d characters", XML_WRITE_BUFFER_SIZE );
return E_INVALID_XML_SYNTAX;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceName
// Desc: Copies a name into the m_pWriteBuf - returns TRUE on success, FALSE on failure
// Ignores leading whitespace. Currently does not support unicode names
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceName()
{
HRESULT hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( ( ( m_Ch < 'A' ) || ( m_Ch > 'Z' ) ) &&
( ( m_Ch < 'a' ) || ( m_Ch > 'z' ) ) &&
( m_Ch != '_' ) && ( m_Ch != ':' ) )
{
Error( E_INVALID_XML_SYNTAX, "Names must start with an alphabetic character or _ or :" );
return E_INVALID_XML_SYNTAX;
}
while( ( ( m_Ch >= 'A' ) && ( m_Ch <= 'Z' ) ) ||
( ( m_Ch >= 'a' ) && ( m_Ch <= 'z' ) ) ||
( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) ) ||
( m_Ch == '_' ) || ( m_Ch == ':' ) ||
( m_Ch == '-' ) || ( m_Ch == '.' ) )
{
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
Error( E_INVALID_XML_SYNTAX, "Total element tag size may not be more than %d characters", XML_WRITE_BUFFER_SIZE );
return E_INVALID_XML_SYNTAX;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
SkipNextAdvance();
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceCharacter
// Desc: Copies the character at *m_pReadPtr to m_Ch
// handling difference in UTF16 / UTF8, and big/little endian
// and getting another chunk of the file if needed
// Returns S_OK if there are more characters, E_ABORT for no characters to read
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceCharacter( BOOL bOkToFail )
{
if( m_bSkipNextAdvance )
{
m_bSkipNextAdvance = FALSE;
return S_OK;
}
// If we hit EOF in the middle of a character,
// it's ok-- we'll just have a corrupt last character
// (the buffer is padded with double NULLs )
if( *m_pReadPtr == '\0' )
{
// Read more from the file
FillBuffer();
// We are at EOF if it is still NULL
if( *m_pReadPtr == '\0' )
{
if( !bOkToFail )
{
Error( E_INVALID_XML_SYNTAX, "Unexpected EOF while parsing XML file" );
return E_INVALID_XML_SYNTAX;
}
else
{
return E_FAIL;
}
}
}
if( m_bUnicode == FALSE )
{
m_Ch = *((CHAR *)m_pReadPtr);
m_pReadPtr++;
}
else // if( m_bUnicode == TRUE )
{
m_Ch = *((WCHAR *)m_pReadPtr);
if( m_bReverseBytes )
{
m_Ch = ( m_Ch << 8 ) + ( m_Ch >> 8 );
}
m_pReadPtr += 2;
}
if( m_Ch == '\n' )
{
m_pISAXCallback->m_LineNum++;
m_pISAXCallback->m_LinePos = 0;
}
else if( m_Ch != '\r' )
m_pISAXCallback->m_LinePos++;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceElement
// Desc: Builds <element> data, calls callback
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceElement()
{
HRESULT hr;
// write ptr at the beginning of the buffer
m_pWritePtr = m_pWriteBuf;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// if first character wasn't '<', we wouldn't be here
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch == '!' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '-' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '-' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '-' after '<!-'" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceComment() ) )
return hr;
return S_OK;
}
if( m_Ch != '[' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'C' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'D' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'A' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'T' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'A' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '[' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCDATA() ) )
return hr;
}
else if( m_Ch == '/' )
{
WCHAR *pEntityRefVal = m_pWritePtr;
if( FAILED( hr = AdvanceName() ) )
return hr;
if( FAILED( m_pISAXCallback->ElementEnd( pEntityRefVal,
(UINT) ( m_pWritePtr - pEntityRefVal ) ) ) )
return E_ABORT;
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '>' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '>' after name for closing entity reference" );
return E_INVALID_XML_SYNTAX;
}
}
else if( m_Ch == '?' )
{
// just skip any xml header tag since not really important after identifying character set
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '>' )
return S_OK;
}
}
else
{
XMLAttribute Attributes[ XML_MAX_ATTRIBUTES_PER_ELEMENT ];
UINT NumAttrs;
WCHAR *pEntityRefVal = m_pWritePtr;
UINT EntityRefLen;
NumAttrs = 0;
SkipNextAdvance();
// Entity tag
if( FAILED( hr = AdvanceName() ) )
return hr;
EntityRefLen = (UINT)( m_pWritePtr - pEntityRefVal );
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// read attributes
while( ( m_Ch != '>' ) && ( m_Ch != '/' ) )
{
SkipNextAdvance();
if ( NumAttrs >= XML_MAX_ATTRIBUTES_PER_ELEMENT )
{
Error( E_INVALID_XML_SYNTAX, "Elements may not have more than %d attributes", XML_MAX_ATTRIBUTES_PER_ELEMENT );
return E_INVALID_XML_SYNTAX;
}
Attributes[ NumAttrs ].strName = m_pWritePtr;
// Attribute name
if( FAILED( hr = AdvanceName() ) )
return hr;
Attributes[ NumAttrs ].NameLen = (UINT)( m_pWritePtr - Attributes[ NumAttrs ].strName );
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '=' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '=' character after attribute name" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = ConsumeSpace() ) )
return hr;
Attributes[ NumAttrs ].strValue = m_pWritePtr;
if( FAILED( hr = AdvanceAttrVal() ) )
return hr;
Attributes[ NumAttrs ].ValueLen = (UINT)( m_pWritePtr -
Attributes[ NumAttrs ].strValue );
++NumAttrs;
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
if( m_Ch == '/' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '>' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '>' after '/' in element tag" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( m_pISAXCallback->ElementBegin( pEntityRefVal, EntityRefLen,
Attributes, NumAttrs ) ) )
return E_ABORT;
if( FAILED( m_pISAXCallback->ElementEnd( pEntityRefVal, EntityRefLen ) ) )
return E_ABORT;
}
else
{
if( FAILED( m_pISAXCallback->ElementBegin( pEntityRefVal, EntityRefLen,
Attributes, NumAttrs ) ) )
return E_ABORT;
}
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceCDATA
// Desc: Read a CDATA section
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceCDATA()
{
HRESULT hr;
WORD wStage = 0;
if( FAILED( m_pISAXCallback->CDATABegin() ) )
return E_ABORT;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( ( m_Ch == ']' ) && ( wStage == 0 ) )
wStage = 1;
else if( ( m_Ch == ']' ) && ( wStage == 1 ) )
wStage = 2;
else if( ( m_Ch == '>' ) && ( wStage == 2 ) )
{
m_pWritePtr -= 3;
break;
}
else
wStage = 0;
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
if( FAILED( m_pISAXCallback->CDATAData( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), TRUE ) ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
}
}
if( FAILED( m_pISAXCallback->CDATAData( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
if( FAILED( m_pISAXCallback->CDATAEnd() ) )
return E_ABORT;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceComment
// Desk: Skips over a comment
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceComment()
{
HRESULT hr;
WORD wStage;
wStage = 0;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if (( m_Ch == '-' ) && ( wStage == 0 ))
wStage = 1;
else if (( m_Ch == '-' ) && ( wStage == 1 ))
wStage = 2;
else if (( m_Ch == '>' ) && ( wStage == 2 ))
break;
else
wStage = 0;
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::RegisterSAXCallbackInterface
// Desc: Registers callback interface
//-------------------------------------------------------------------------------------
VOID XMLParser::RegisterSAXCallbackInterface( ISAXCallback *pISAXCallback )
{
m_pISAXCallback = pISAXCallback;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::GetSAXCallbackInterface
// Desc: Returns current callback interface
//-------------------------------------------------------------------------------------
ISAXCallback* XMLParser::GetSAXCallbackInterface()
{
return m_pISAXCallback;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::MainParseLoop
// Desc: Main Loop to Parse Data - source agnostic
//-------------------------------------------------------------------------------------
HRESULT XMLParser::MainParseLoop()
{
BOOL bWhiteSpaceOnly = TRUE;
HRESULT hr = S_OK;
if( FAILED( m_pISAXCallback->StartDocument() ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
FillBuffer();
if ( *((WCHAR *) m_pReadBuf ) == 0xFEFF )
{
m_bUnicode = TRUE;
m_bReverseBytes = FALSE;
m_pReadPtr += 2;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0xFFFE )
{
m_bUnicode = TRUE;
m_bReverseBytes = TRUE;
m_pReadPtr += 2;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0x003C )
{
m_bUnicode = TRUE;
m_bReverseBytes = FALSE;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0x3C00 )
{
m_bUnicode = TRUE;
m_bReverseBytes = TRUE;
}
else if ( m_pReadBuf[ 0 ] == 0x3C )
{
m_bUnicode = FALSE;
m_bReverseBytes = FALSE;
}
else
{
Error( E_INVALID_XML_SYNTAX, "Unrecognized encoding (parser does not support UTF-8 language encodings)" );
return E_INVALID_XML_SYNTAX;
}
for( ;; )
{
if( FAILED( AdvanceCharacter( TRUE ) ) )
{
if ( ( (UINT) ( m_pWritePtr - m_pWriteBuf ) != 0 ) && ( !bWhiteSpaceOnly ) )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
bWhiteSpaceOnly = TRUE;
}
if( FAILED( m_pISAXCallback->EndDocument() ) )
return E_ABORT;
return S_OK;
}
if( m_Ch == '<' )
{
if( ( (UINT) ( m_pWritePtr - m_pWriteBuf ) != 0 ) && ( !bWhiteSpaceOnly ) )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
bWhiteSpaceOnly = TRUE;
}
SkipNextAdvance();
m_pWritePtr = m_pWriteBuf;
if( FAILED( hr = AdvanceElement() ) )
return hr;
m_pWritePtr = m_pWriteBuf;
}
else
{
if( m_Ch == '&' )
{
SkipNextAdvance();
if( FAILED( hr = ConvertEscape() ) )
return hr;
}
if( bWhiteSpaceOnly && ( m_Ch != ' ' ) && ( m_Ch != '\n' ) && ( m_Ch != '\r' ) &&
( m_Ch != '\t' ) )
{
bWhiteSpaceOnly = FALSE;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
if( !bWhiteSpaceOnly )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf,
( UINT ) ( m_pWritePtr - m_pWriteBuf ),
TRUE ) ) )
{
return E_ABORT;
}
}
m_pWritePtr = m_pWriteBuf;
bWhiteSpaceOnly = TRUE;
}
}
}
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ParseXMLFile
// Desc: Builds element data
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ParseXMLFile( CONST CHAR *strFilename )
{
HRESULT hr;
if( m_pISAXCallback == NULL )
return E_NOINTERFACE;
m_pISAXCallback->m_LineNum = 1;
m_pISAXCallback->m_LinePos = 0;
m_pISAXCallback->m_strFilename = strFilename; // save this off only while we parse the file
m_bSkipNextAdvance = FALSE;
m_pReadPtr = m_pReadBuf;
m_pReadBuf[ 0 ] = '\0';
m_pReadBuf[ 1 ] = '\0';
m_pInXMLBuffer = NULL;
m_uInXMLBufferCharsLeft = 0;
m_hFile = CreateFile( strFilename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_SEQUENTIAL_SCAN, NULL );
if( m_hFile == INVALID_HANDLE_VALUE )
{
Error( E_COULD_NOT_OPEN_FILE, "Error opening file" );
hr = E_COULD_NOT_OPEN_FILE;
}
else
{
LARGE_INTEGER iFileSize;
GetFileSizeEx( m_hFile, &iFileSize );
m_dwCharsTotal = (DWORD)iFileSize.QuadPart;
m_dwCharsConsumed = 0;
hr = MainParseLoop();
}
// Close the file
if( m_hFile != INVALID_HANDLE_VALUE )
CloseHandle( m_hFile );
m_hFile = INVALID_HANDLE_VALUE;
// we no longer own strFilename, so un-set it
m_pISAXCallback->m_strFilename = NULL;
return hr;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ParseXMLFile
// Desc: Builds element data
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ParseXMLBuffer( CONST CHAR *strBuffer, UINT uBufferSize )
{
HRESULT hr;
if( m_pISAXCallback == NULL )
return E_NOINTERFACE;
m_pISAXCallback->m_LineNum = 1;
m_pISAXCallback->m_LinePos = 0;
m_pISAXCallback->m_strFilename = ""; // save this off only while we parse the file
m_bSkipNextAdvance = FALSE;
m_pReadPtr = m_pReadBuf;
m_pReadBuf[ 0 ] = '\0';
m_pReadBuf[ 1 ] = '\0';
m_hFile = NULL;
m_pInXMLBuffer = strBuffer;
m_uInXMLBufferCharsLeft = uBufferSize;
m_dwCharsTotal = m_uInXMLBufferCharsLeft;
m_dwCharsConsumed = 0;
hr = MainParseLoop();
// we no longer own strFilename, so un-set it
m_pISAXCallback->m_strFilename = NULL;
return hr;
}
//-------------------------------------------------------------------------------------
// XMLParser::Error()
// Logs an error through the callback interface
//-------------------------------------------------------------------------------------
VOID XMLParser::Error( HRESULT hErr, CONST CHAR* strFormat, ... )
{
CONST INT MAX_OUTPUT_STR = 160;
CHAR strBuffer[ MAX_OUTPUT_STR ];
va_list pArglist;
va_start( pArglist, strFormat );
vsprintf_s( strBuffer, strFormat, pArglist );
m_pISAXCallback->Error( hErr, strBuffer );
va_end( pArglist );
}
} // namespace ATG
| MorrigansWings/GamePhysics | PlanetSim/DirectX/Samples/C++/Misc/ContentExporter/ExportObjects/ExportXmlParser.cpp | C++ | apache-2.0 | 29,004 |
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.execution.testframework.sm.runner;
import com.intellij.execution.testframework.sm.SMTestRunnerConnectionUtil;
import com.intellij.execution.testframework.sm.runner.events.*;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Key;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import java.util.*;
/**
* This class fires events to SMTRunnerEventsListener in event dispatch thread.
*
* @author: Roman Chernyatchik
*/
public class GeneralToSMTRunnerEventsConvertor extends GeneralTestEventsProcessor {
private final Map<String, SMTestProxy> myRunningTestsFullNameToProxy = ContainerUtil.newConcurrentMap();
private final TestSuiteStack mySuitesStack;
private final Map<String, List<SMTestProxy>> myCurrentChildren = new HashMap<>();
private boolean myIsTestingFinished;
public GeneralToSMTRunnerEventsConvertor(Project project, @NotNull SMTestProxy.SMRootTestProxy testsRootNode,
@NotNull String testFrameworkName) {
super(project, testFrameworkName, testsRootNode);
mySuitesStack = new TestSuiteStack(testFrameworkName);
}
@Override
protected SMTestProxy createProxy(String testName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy proxy = super.createProxy(testName, locationHint, metaInfo, id, parentNodeId);
SMTestProxy currentSuite = getCurrentSuite();
currentSuite.addChild(proxy);
return proxy;
}
@Override
protected SMTestProxy createSuite(String suiteName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy newSuite = super.createSuite(suiteName, locationHint, metaInfo, id, parentNodeId);
final SMTestProxy parentSuite = getCurrentSuite();
parentSuite.addChild(newSuite);
mySuitesStack.pushSuite(newSuite);
return newSuite;
}
@Override
public void onSuiteTreeEnded(String suiteName) {
myBuildTreeRunnables.add(() -> mySuitesStack.popSuite(suiteName));
super.onSuiteTreeEnded(suiteName);
}
@Override
public void onStartTesting() {
//fire
mySuitesStack.pushSuite(myTestsRootProxy);
myTestsRootProxy.setStarted();
//fire
fireOnTestingStarted(myTestsRootProxy);
}
@Override
public void onTestsReporterAttached() {
fireOnTestsReporterAttached(myTestsRootProxy);
}
@Override
public void onFinishTesting() {
fireOnBeforeTestingFinished(myTestsRootProxy);
// has been already invoked!
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (myIsTestingFinished) {
// has been already invoked!
return;
}
myIsTestingFinished = true;
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (!isTreeComplete(myRunningTestsFullNameToProxy.keySet(), myTestsRootProxy)) {
myTestsRootProxy.setTerminated();
myRunningTestsFullNameToProxy.clear();
}
mySuitesStack.clear();
myTestsRootProxy.setFinished();
myCurrentChildren.clear();
//fire events
fireOnTestingFinished(myTestsRootProxy);
super.onFinishTesting();
}
@Override
public void setPrinterProvider(@NotNull TestProxyPrinterProvider printerProvider) {
}
@Override
public void onTestStarted(@NotNull final TestStartedEvent testStartedEvent) {
//Duplicated event
// creates test
// adds to running tests map
//Progress started
//fire events
final String testName = testStartedEvent.getName();
final String locationUrl = testStartedEvent.getLocationUrl();
final boolean isConfig = testStartedEvent.isConfig();
final String fullName = getFullTestName(testName);
if (myRunningTestsFullNameToProxy.containsKey(fullName)) {
//Duplicated event
logProblem("Test [" + fullName + "] has been already started");
if (SMTestRunnerConnectionUtil.isInDebugMode()) {
return;
}
}
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy testProxy = findChild(parentSuite, locationUrl != null ? locationUrl : fullName, false);
if (testProxy == null) {
// creates test
testProxy = new SMTestProxy(testName, false, locationUrl, testStartedEvent.getMetainfo(), false);
testProxy.setConfig(isConfig);
if (myTreeBuildBeforeStart) testProxy.setTreeBuildBeforeStart();
if (myLocator != null) {
testProxy.setLocator(myLocator);
}
parentSuite.addChild(testProxy);
}
// adds to running tests map
myRunningTestsFullNameToProxy.put(fullName, testProxy);
//Progress started
testProxy.setStarted();
//fire events
fireOnTestStarted(testProxy);
}
@Override
public void onSuiteStarted(@NotNull final TestSuiteStartedEvent suiteStartedEvent) {
//new suite
//Progress started
//fire event
final String suiteName = suiteStartedEvent.getName();
final String locationUrl = suiteStartedEvent.getLocationUrl();
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy newSuite = findChild(parentSuite, locationUrl != null ? locationUrl : suiteName, true);
if (newSuite == null) {
//new suite
newSuite = new SMTestProxy(suiteName, true, locationUrl, suiteStartedEvent.getMetainfo(), parentSuite.isPreservePresentableName());
if (myTreeBuildBeforeStart) {
newSuite.setTreeBuildBeforeStart();
}
if (myLocator != null) {
newSuite.setLocator(myLocator);
}
parentSuite.addChild(newSuite);
}
initCurrentChildren(newSuite, true);
mySuitesStack.pushSuite(newSuite);
//Progress started
newSuite.setSuiteStarted();
//fire event
fireOnSuiteStarted(newSuite);
}
private void initCurrentChildren(SMTestProxy newSuite, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
for (SMTestProxy proxy : newSuite.getChildren()) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
String url = proxy.getLocationUrl();
if (url != null) {
myCurrentChildren.computeIfAbsent(url, l -> new ArrayList<>()).add(proxy);
}
myCurrentChildren.computeIfAbsent(proxy.getName(), l -> new ArrayList<>()).add(proxy);
}
}
}
}
private SMTestProxy findChild(SMTestProxy parentSuite, String fullName, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
Set<SMTestProxy> acceptedProxies = new LinkedHashSet<>();
Collection<? extends SMTestProxy> children = myCurrentChildren.get(fullName);
if (children == null) {
initCurrentChildren(parentSuite, preferSuite);
children = myCurrentChildren.get(fullName);
}
if (children != null) { //null if child started second time
for (SMTestProxy proxy : children) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
acceptedProxies.add(proxy);
}
}
if (!acceptedProxies.isEmpty()) {
return acceptedProxies.stream()
.filter(proxy -> proxy.isSuite() == preferSuite && proxy.getParent() == parentSuite)
.findFirst()
.orElse(acceptedProxies.iterator().next());
}
}
}
return null;
}
@Override
public void onTestFinished(@NotNull final TestFinishedEvent testFinishedEvent) {
final String testName = testFinishedEvent.getName();
final Long duration = testFinishedEvent.getDuration();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFinished event: name = {" + testName + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.setDuration(duration != null ? duration : 0);
testProxy.setFrameworkOutputFile(testFinishedEvent.getOutputFile());
testProxy.setFinished();
myRunningTestsFullNameToProxy.remove(fullTestName);
clearCurrentChildren(fullTestName, testProxy);
//fire events
fireOnTestFinished(testProxy);
}
private void clearCurrentChildren(String fullTestName, SMTestProxy testProxy) {
myCurrentChildren.remove(fullTestName);
String url = testProxy.getLocationUrl();
if (url != null) {
myCurrentChildren.remove(url);
}
}
@Override
public void onSuiteFinished(@NotNull final TestSuiteFinishedEvent suiteFinishedEvent) {
//fire events
final String suiteName = suiteFinishedEvent.getName();
final SMTestProxy mySuite = mySuitesStack.popSuite(suiteName);
if (mySuite != null) {
mySuite.setFinished();
myCurrentChildren.remove(suiteName);
String locationUrl = mySuite.getLocationUrl();
if (locationUrl != null) {
myCurrentChildren.remove(locationUrl);
}
//fire events
fireOnSuiteFinished(mySuite);
}
}
@Override
public void onUncapturedOutput(@NotNull final String text, final Key outputType) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addOutput(text, outputType);
}
@Override
public void onError(@NotNull final String localizedMessage,
@Nullable final String stackTrace,
final boolean isCritical) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addError(localizedMessage, stackTrace, isCritical);
}
@Override
public void onTestFailure(@NotNull final TestFailedEvent testFailedEvent) {
// if hasn't been already reported
// 1. report
// 2. add failure
// fire event
final String testName = testFailedEvent.getName();
if (testName == null) {
logProblem("No test name specified in " + testFailedEvent);
return;
}
final String localizedMessage = testFailedEvent.getLocalizedFailureMessage();
final String stackTrace = testFailedEvent.getStacktrace();
final boolean isTestError = testFailedEvent.isTestError();
final String comparisionFailureActualText = testFailedEvent.getComparisonFailureActualText();
final String comparisionFailureExpectedText = testFailedEvent.getComparisonFailureExpectedText();
final boolean inDebugMode = SMTestRunnerConnectionUtil.isInDebugMode();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFailure event: name = {" + testName + "}" +
", message = {" + localizedMessage + "}" +
", stackTrace = {" + stackTrace + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (inDebugMode) {
return;
}
else {
// if hasn't been already reported
// 1. report
onTestStarted(new TestStartedEvent(testName, null));
// 2. add failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
if (comparisionFailureActualText != null && comparisionFailureExpectedText != null) {
testProxy.setTestComparisonFailed(localizedMessage, stackTrace, comparisionFailureActualText, comparisionFailureExpectedText,
testFailedEvent);
}
else if (comparisionFailureActualText == null && comparisionFailureExpectedText == null) {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
}
else {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
logProblem("Comparison failure actual and expected texts should be both null or not null.\n"
+ "Expected:\n"
+ comparisionFailureExpectedText + "\n"
+ "Actual:\n"
+ comparisionFailureActualText);
}
// fire event
fireOnTestFailed(testProxy);
}
@Override
public void onTestIgnored(@NotNull final TestIgnoredEvent testIgnoredEvent) {
// try to fix
// 1. report test opened
// 2. report failure
// fire event
final String testName = testIgnoredEvent.getName();
if (testName == null) {
logProblem("TestIgnored event: no name");
}
String ignoreComment = testIgnoredEvent.getIgnoreComment();
final String stackTrace = testIgnoredEvent.getStacktrace();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
final boolean debugMode = SMTestRunnerConnectionUtil.isInDebugMode();
logProblem("Test wasn't started! " +
"TestIgnored event: name = {" + testName + "}, " +
"message = {" + ignoreComment + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (debugMode) {
return;
}
else {
// try to fix
// 1. report test opened
onTestStarted(new TestStartedEvent(testName, null));
// 2. report failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
testProxy.setTestIgnored(ignoreComment, stackTrace);
// fire event
fireOnTestIgnored(testProxy);
}
@Override
public void onTestOutput(@NotNull final TestOutputEvent testOutputEvent) {
final String testName = testOutputEvent.getName();
final String text = testOutputEvent.getText();
final Key outputType = testOutputEvent.getOutputType();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestOutput event: name = {" + testName + "}, " +
"outputType = " + outputType + ", " +
"text = {" + text + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.addOutput(text, outputType);
}
@Override
public void onTestsCountInSuite(final int count) {
fireOnTestsCountInSuite(count);
}
@NotNull
protected final SMTestProxy getCurrentSuite() {
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
if (currentSuite != null) {
return currentSuite;
}
// current suite shouldn't be null otherwise test runner isn't correct
// or may be we are in debug mode
logProblem("Current suite is undefined. Root suite will be used.");
return myTestsRootProxy;
}
protected String getFullTestName(final String testName) {
// Test name should be unique
return testName;
}
protected int getRunningTestsQuantity() {
return myRunningTestsFullNameToProxy.size();
}
@Nullable
protected SMTestProxy getProxyByFullTestName(final String fullTestName) {
return myRunningTestsFullNameToProxy.get(fullTestName);
}
@TestOnly
protected void clearInternalSuitesStack() {
mySuitesStack.clear();
}
private String cannotFindFullTestNameMsg(String fullTestName) {
return "Cant find running test for ["
+ fullTestName
+ "]. Current running tests: {"
+ dumpRunningTestsNames() + "}";
}
private StringBuilder dumpRunningTestsNames() {
final Set<String> names = myRunningTestsFullNameToProxy.keySet();
final StringBuilder namesDump = new StringBuilder();
for (String name : names) {
namesDump.append('[').append(name).append(']').append(',');
}
return namesDump;
}
/*
* Remove listeners, etc
*/
@Override
public void dispose() {
super.dispose();
if (!myRunningTestsFullNameToProxy.isEmpty()) {
final Application application = ApplicationManager.getApplication();
if (!application.isHeadlessEnvironment() && !application.isUnitTestMode()) {
logProblem("Not all events were processed! " + dumpRunningTestsNames());
}
}
myRunningTestsFullNameToProxy.clear();
mySuitesStack.clear();
}
private SMTestProxy findCurrentTestOrSuite() {
//if we can locate test - we will send output to it, otherwise to current test suite
SMTestProxy currentProxy = null;
Iterator<SMTestProxy> iterator = myRunningTestsFullNameToProxy.values().iterator();
if (iterator.hasNext()) {
//current test
currentProxy = iterator.next();
if (iterator.hasNext()) { //if there are multiple tests running call put output to the suite
currentProxy = null;
}
}
if (currentProxy == null) {
//current suite
//
// ProcessHandler can fire output available event before processStarted event
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
currentProxy = currentSuite != null ? currentSuite : myTestsRootProxy;
}
return currentProxy;
}
}
| msebire/intellij-community | platform/smRunner/src/com/intellij/execution/testframework/sm/runner/GeneralToSMTRunnerEventsConvertor.java | Java | apache-2.0 | 17,626 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Data;
using System.Data.SQLite;
using System.Data.Common;
using System.IO;
namespace GatherAll
{
class Cls_SqliteMng
{
//string m_DBName = "";
//string connStr = "";
//创建一个数据库文件,保存在当前目录下HyData文件夹下
//
public void CreateDB(string dbName)
{
// string databaseFileName = System.Environment.CurrentDirectory + @"/HyData/" + dbName;
SQLiteConnection.CreateFile(dbName);
}
//执行Sql语句
//创建一个表: ExecuteSql("create table HyTest(TestID TEXT)");
//插入些数据: ExecuteSql("insert into HyTest(TestID) values('1001')");
public void ExecuteSql(string sqlStr, string strConStr)
{
//connStr = connStr1 + m_DBName + connStr;
using (DbConnection conn = new SQLiteConnection(strConStr))
{
conn.Open();
DbCommand comm = conn.CreateCommand();
comm.CommandText = sqlStr;
comm.CommandType = CommandType.Text;
comm.ExecuteNonQuery();
}
}
////执行查询返回DataSet
//private DataSet ExecDataSet(string sqlStr)
//{
// //connStr = "";
// //connStr = connStr1 + m_DBName + connStr;
// using (SQLiteConnection conn = new SQLiteConnection(sqlStr))
// {
// conn.Open();
// SQLiteCommand cmd = conn.CreateCommand();
// cmd.CommandText = sqlStr;
// cmd.CommandType = CommandType.Text;
// SQLiteDataAdapter da = new SQLiteDataAdapter(cmd);
// DataSet ds = new DataSet();
// da.Fill(ds);
// return ds;
// }
//}
}
}
| songboriceboy/GatherAllStoreInDB | GatherAll/Cls_SqliteMng.cs | C# | apache-2.0 | 2,016 |
package br.copacabana;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import javax.cache.Cache;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.web.servlet.ModelAndView;
import br.com.copacabana.cb.entities.Address;
import br.com.copacabana.cb.entities.Client;
import br.com.copacabana.cb.entities.MealOrder;
import br.com.copacabana.cb.entities.OrderedPlate;
import br.com.copacabana.cb.entities.Plate;
import br.com.copacabana.cb.entities.Restaurant;
import br.com.copacabana.cb.entities.TurnType;
import br.com.copacabana.cb.entities.WorkingHours.DayOfWeek;
import br.copacabana.order.paypal.PayPalProperties.PayPalConfKeys;
import br.copacabana.spring.AddressManager;
import br.copacabana.spring.ClientManager;
import br.copacabana.spring.ConfigurationManager;
import br.copacabana.spring.PlateManager;
import br.copacabana.spring.RestaurantManager;
import br.copacabana.usecase.control.UserActionManager;
import br.copacabana.util.TimeController;
import com.google.appengine.api.datastore.Key;
import com.google.appengine.api.datastore.KeyFactory;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
/**
* @author Rafael Coutinho
*/
public class PlaceOrderController extends JsonViewController {
private String formView;
private String successView;
@Override
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception {
Map<String, Object> model = new HashMap<String, Object>();
model.put("mode", "view");
try {
Cache cache = CacheController.getCache();
if (cache.get(PayPalConfKeys.pppFixedRate.name()) == null) {
ConfigurationManager cm = new ConfigurationManager();
cache.put(PayPalConfKeys.pppFixedRate.name(), cm.getConfigurationValue(PayPalConfKeys.pppFixedRate.name()));
cache.put(PayPalConfKeys.pppPercentageValue.name(), cm.getConfigurationValue(PayPalConfKeys.pppPercentageValue.name()));
}
if (!Authentication.isUserLoggedIn(request.getSession())) {
String orderData = request.getParameter("orderData");
request.getSession().setAttribute("orderData", orderData);
model.put("forwardUrl", "/continueOrder.jsp");
UserActionManager.startOrderNotLogged(orderData, request.getSession().getId());
return new ModelAndView(getFormView(), model);
} else {
String orderData = "";
JsonObject user = Authentication.getLoggedUser(request.getSession());
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
if (request.getParameter("orderData") == null) {
orderData = (String) request.getSession().getAttribute("orderData");
} else {
orderData = request.getParameter("orderData");
}
log.log(Level.INFO, "OrderJSon: {0}", orderData);
JsonParser pa = new JsonParser();
JsonObject orderDataJson = (JsonObject) pa.parse(orderData);
ClientManager cman = new ClientManager();
Client c = cman.find(KeyFactory.stringToKey(loggedUserId), Client.class);
MealOrder mo = getMealOrder(c, orderDataJson);
request.getSession().setAttribute("clientPhone", "");
DateSerializer dateSerializer = new DateSerializer(request);
DateDeSerializer dateDeSerializer = new DateDeSerializer(request);
GsonBuilder gsonBuilder = GsonBuilderFactory.getInstance();// new
// GsonBuilder().setPrettyPrinting().serializeNulls().excludeFieldsWithoutExposeAnnotation();
gsonBuilder.registerTypeAdapter(Date.class, dateSerializer);
gsonBuilder.registerTypeAdapter(Date.class, dateDeSerializer);
gsonBuilder.registerTypeAdapter(Key.class, new KeyDeSerializer());
gsonBuilder.registerTypeAdapter(Key.class, new KeySerializer());
Gson gson = gsonBuilder.create();
model.putAll(updateModelData(mo, c, gson));
String json = gson.toJson(mo); // Or use new
json = GsonBuilderFactory.escapeString(json);
request.getSession().setAttribute("orderData", json);
UserActionManager.startOrder(json, loggedUserId, request.getSession().getId());
return new ModelAndView(getSuccessView(), model);
}
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to place order.");
try {
String orderData = "";
log.log(Level.SEVERE, "Checking logged user.");
JsonObject user = Authentication.getLoggedUser(request.getSession());
if (user == null) {
log.log(Level.SEVERE, "user is not logged in.");
}
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
log.log(Level.SEVERE, "logged user id {0}", loggedUserId);
if (request.getParameter("orderData") == null) {
log.log(Level.SEVERE, "Order is not in request, checking session");
orderData = (String) request.getSession().getAttribute("orderData");
} else {
log.log(Level.SEVERE, "Order is in request");
orderData = request.getParameter("orderData");
}
if (orderData == null) {
log.log(Level.SEVERE, "Order was null!");
}
log.log(Level.SEVERE, "Order is order :" + orderData);
log.log(Level.SEVERE, "Exception was {0}.", e);
log.log(Level.SEVERE, "Error was {0}.", e.getMessage());
UserActionManager.registerMajorError(request, e, loggedUserId, request.getSession().getId(), "placing order");
} catch (Exception ex) {
log.log(Level.SEVERE, "Failed during loggin of error was {0}.", e);
UserActionManager.registerMajorError(request, e, "placing order 2");
}
throw e;
}
}
public static Map<String, Object> updateModelData(MealOrder mo, Client c, Gson gson) {
Map<String, Object> model = new HashMap<String, Object>();
RestaurantManager rman = new RestaurantManager();
Restaurant r = rman.getRestaurant(mo.getRestaurant());
Boolean b = r.getOnlyForRetrieval();
if (b != null && true == b) {
model.put("onlyForRetrieval", Boolean.TRUE);
} else {
model.put("onlyForRetrieval", Boolean.FALSE);
}
model.put("restaurantAddressKey", KeyFactory.keyToString(r.getAddress()));
model.put("clientCpf", c.getCpf());
model.put("level", c.getLevel().ordinal());
JsonObject json = new JsonObject();
ConfigurationManager cm = new ConfigurationManager();
String hasSpecificLogic = cm.getConfigurationValue("hasSpecificLogic");
model.put("noTakeAwayOrders", "false");
if (hasSpecificLogic != null && hasSpecificLogic.endsWith("true")) {
json = getSteakHouseSpecificData(mo, c, gson);
getMakisSpecificLogic(mo, c, gson, json);
getPapagaiosSpecificLogic(mo, c, gson, json);
getPizzadoroSpecificLogic(mo,c,gson,json);
if (noTakeAwayOrders(mo) == true) {
model.put("noTakeAwayOrders", "true");
}
}
model.put("hasSpecificLogic", json.toString());
if (json.get("javascript") != null && json.get("javascript").getAsString().length() > 0) {
model.put("hasSpecificLogicJavascript", json.get("javascript").getAsString());
}
Address restAddress = new AddressManager().getAddress(r.getAddress());
model.put("restaurantAddress", gson.toJson(restAddress));
return model;
}
private static boolean noTakeAwayOrders(MealOrder mo) {
ConfigurationManager cm = new ConfigurationManager();
String ids = cm.getConfigurationValue("no.takeaway.ids");
String restId = KeyFactory.keyToString(mo.getRestaurant());
if (ids.contains(restId)) {
return true;
}
return false;
}
private static void getPapagaiosSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("papagaios.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/papagaios.js"));
}
}
}
private static void getPizzadoroSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("pizzadoro.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/pizzadoro.js"));
}
}
}
private static void getMakisSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
try {
ConfigurationManager cm = new ConfigurationManager();
PlateManager pm = new PlateManager();
String makisIdStr = cm.getConfigurationValue("makis.Id");
if (makisIdStr != null && makisIdStr.length() > 0) {
Key makis = KeyFactory.stringToKey(makisIdStr);
if (makis != null && makis.equals(mo.getRestaurant())) {
String packageId = cm.getConfigurationValue("makis.package.id");
if (packageId != null && packageId.length() > 0) {
json.add("makisPackageCostId", new JsonPrimitive(packageId));
json.add("makisMsg", new JsonPrimitive(cm.getConfigurationValue("makis.msg")));
boolean isIncluded = false;
Key packageKey = KeyFactory.stringToKey(packageId);
for (Iterator<OrderedPlate> iterator = mo.getPlates().iterator(); iterator.hasNext();) {
OrderedPlate plate = (OrderedPlate) iterator.next();
if (Boolean.FALSE.equals(plate.getIsFraction()) && plate.getPlate().equals(packageKey)) {
isIncluded = true;
break;
}
}
if (isIncluded == false) {
Plate packagePlate = pm.get(packageKey);
OrderedPlate oplate = new OrderedPlate();
oplate.setName(packagePlate.getName());
oplate.setPrice(packagePlate.getPrice());
oplate.setPriceInCents(packagePlate.getPriceInCents());
oplate.setQty(1);
oplate.setPlate(packageKey);
mo.getPlates().add(oplate);
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "failed to add makis specific logic", e);
}
}
private static JsonObject getSteakHouseSpecificData(MealOrder mo, Client c, Gson gson) {
JsonObject json = new JsonObject();
json.add("freeDelivery", new JsonPrimitive("false"));
try {
ConfigurationManager cm = new ConfigurationManager();
String steakIdStr = cm.getConfigurationValue("steakHouse.Id");
if (steakIdStr != null && steakIdStr.length() > 0) {
Key steak = KeyFactory.stringToKey(steakIdStr);
if (steak.equals(mo.getRestaurant())) {
if (!TimeController.getDayOfWeek().equals(DayOfWeek.SATURDAY) && !TimeController.getDayOfWeek().equals(DayOfWeek.SUNDAY)) {
if (TimeController.getCurrentTurn().equals(TurnType.LUNCH)) {
String foodCatsStr = cm.getConfigurationValue("steakHouse.FoodCats");
if (foodCatsStr != null && foodCatsStr.length() > 0) {
String[] foodCatsArray = foodCatsStr.split("\\|");
Set<Key> foodCats = new HashSet<Key>();
for (int i = 0; i < foodCatsArray.length; i++) {
if (foodCatsArray[i].length() > 0) {
foodCats.add(KeyFactory.stringToKey(foodCatsArray[i]));
}
}
List<OrderedPlate> plates = mo.getPlates();
PlateManager pm = new PlateManager();
for (Iterator iterator = plates.iterator(); iterator.hasNext();) {
OrderedPlate orderedPlate = (OrderedPlate) iterator.next();
Plate p = null;
if (Boolean.TRUE.equals(orderedPlate.getIsFraction())) {
p = pm.getPlate(orderedPlate.getFractionPlates().iterator().next());
} else {
p = pm.getPlate(orderedPlate.getPlate());
}
if (!foodCats.contains(p.getFoodCategory())) {
json.add("freeDelivery", new JsonPrimitive("false"));
return json;
}
}
json.add("freeDelivery", new JsonPrimitive("true"));
json.add("msg", new JsonPrimitive(cm.getConfigurationValue("steakHouse.msg")));
}
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "Could not set up things for SteakHouse", e);
}
return json;
}
public MealOrder getMealOrder(Client c, JsonObject sessionOderData) {
MealOrder mo = new MealOrder();
mo.setClient(c);
if (c.getContact() != null) {
mo.setClientPhone(c.getContact().getPhone());
}
mo.setAddress(getAddress(sessionOderData, c));
mo.setObservation(getObservation(sessionOderData));
mo.setRestaurant(getRestKey(sessionOderData));
mo.setPlates(getPlates(sessionOderData));
return mo;
}
private Key getAddress(JsonObject sessionOderData, Client c) {
try {
if (sessionOderData.get("address") == null) {
if (c.getMainAddress() != null) {
return c.getMainAddress();
} else {
return null;
}
} else {
if (sessionOderData.get("address") != null && !sessionOderData.get("address").isJsonNull() ) {
return KeyFactory.stringToKey(sessionOderData.get("address").getAsString());
}else{
return null;
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "no address da sessão havia {0}", sessionOderData.get("address"));
log.log(Level.SEVERE, "Error ao buscar endereço de cliente ou em sessão", e);
return null;
}
}
public List<OrderedPlate> getPlates(JsonObject sessionOderData) {
List<OrderedPlate> orderedPlates = new ArrayList<OrderedPlate>();
JsonArray array = sessionOderData.get("plates").getAsJsonArray();
for (int i = 0; i < array.size(); i++) {
JsonObject pjson = array.get(i).getAsJsonObject();
orderedPlates.add(getOrdered(pjson));
}
return orderedPlates;
}
private OrderedPlate getOrdered(JsonObject pjson) {
OrderedPlate oplate = new OrderedPlate();
oplate.setName(pjson.get("name").getAsString());
oplate.setPrice(pjson.get("price").getAsDouble());
oplate.setPriceInCents(Double.valueOf(pjson.get("price").getAsDouble() * 100.0).intValue());
oplate.setQty(pjson.get("qty").getAsInt());
if (pjson.get("isFraction").getAsBoolean() == true) {
oplate.setIsFraction(Boolean.TRUE);
Set<Key> fractionPlates = new HashSet<Key>();
JsonArray fractionKeys = pjson.get("fractionKeys").getAsJsonArray();
for (int i = 0; i < fractionKeys.size(); i++) {
Key fractionKey = KeyFactory.stringToKey(fractionKeys.get(i).getAsString());
fractionPlates.add(fractionKey);
}
oplate.setFractionPlates(fractionPlates);
return oplate;
} else {
String pkey = "";
if (pjson.get("plate").isJsonObject()) {
pkey = pjson.get("plate").getAsJsonObject().get("id").getAsString();
} else {
pkey = pjson.get("plate").getAsString();
}
oplate.setPlate(KeyFactory.stringToKey(pkey));
return oplate;
}
}
public Key getRestKey(JsonObject sessionOderData) {
String restKey;
if (sessionOderData.get("restaurant") != null) {
if (sessionOderData.get("restaurant").isJsonObject()) {
restKey = sessionOderData.get("restaurant").getAsJsonObject().get("id").getAsString();
} else {
restKey = sessionOderData.get("restaurant").getAsString();
}
} else {
restKey = sessionOderData.get("plates").getAsJsonArray().get(0).getAsJsonObject().get("plate").getAsJsonObject().get("value").getAsJsonObject().get("restaurant").getAsString();
}
return KeyFactory.stringToKey(restKey);
}
public String getObservation(JsonObject sessionOderData) {
return sessionOderData.get("observation").getAsString();
}
public String getFormView() {
return formView;
}
public void setFormView(String formView) {
this.formView = formView;
}
public String getSuccessView() {
return successView;
}
public void setSuccessView(String successView) {
this.successView = successView;
}
} | rafaelcoutinho/comendobemdelivery | src/br/copacabana/PlaceOrderController.java | Java | apache-2.0 | 16,224 |
current_dir = File.dirname(__FILE__)
log_level :info
log_location STDOUT
node_name "user"
client_key "#{ENV['HOME']}/.ssh/user.pem"
validation_client_name "user-validator"
validation_key "#{current_dir}/validator.pem"
chef_server_url "https://api.opscode.com/organizations/user-organization"
cache_type 'BasicFile'
cache_options(:path => "#{ENV['HOME']}/.chef/checksums" )
cookbook_path "#{current_dir}/../cookbooks"
# required to extract the right interface for knife ssh
knife[:ssh_attribute] = "ipaddress"
knife[:joyent_username] = ENV['SDC_USERNAME'] || 'user'
knife[:joyent_keyname] = ENV['SDC_CLI_KEY_ID'] || 'keyname'
knife[:joyent_keyfile] = ENV['SDC_CLI_IDENTITY'] || "#{ENV['HOME']}/.ssh/id_rsa"
knife[:joyent_api_url] = 'https://us-sw-1.api.joyentcloud.com/'
| wanelo-chef/smartos-chef-repo | .chef/knife.rb | Ruby | apache-2.0 | 874 |
using System.Threading;
using System.Threading.Tasks;
using MediatR;
namespace CoreDocker.Core.Framework.CommandQuery
{
public class MediatorCommander : ICommander
{
private readonly IMediator _mediator;
public MediatorCommander(IMediator mediator)
{
_mediator = mediator;
}
#region Implementation of ICommander
public async Task Notify<T>(T notificationRequest, CancellationToken cancellationToken) where T : CommandNotificationBase
{
await _mediator.Publish(notificationRequest);
}
public async Task<CommandResult> Execute<T>(T commandRequest, CancellationToken cancellationToken) where T : CommandRequestBase
{
return await _mediator.Send(commandRequest);
}
#endregion
}
} | rolfwessels/CoreDocker | src/CoreDocker.Core/Framework/CommandQuery/MediatorCommander.cs | C# | apache-2.0 | 828 |
package hska.iwi.eShopMaster.model.businessLogic.manager.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import hska.iwi.eShopMaster.model.businessLogic.manager.CategoryManager;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.Category;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.User;
import java.util.List;
import javax.ws.rs.core.MediaType;
import org.apache.log4j.Logger;
public class CategoryManagerImpl implements CategoryManager {
private final static String BASIS_URL_CATEGORY = "http://localhost:8081/api/catalog/category/";
private final Logger logger = Logger.getLogger(CategoryManagerImpl.class);
private final ObjectMapper parser = new ObjectMapper();
private final User currentUser;
public CategoryManagerImpl(User currentUser) {
this.currentUser = currentUser;
}
@Override
public List<Category> getCategories() {
List<Category> categories = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
categories = parser.readValue(response.getEntity(String.class), List.class);
} catch (Exception ex) {
logger.error(ex);
}
return categories;
}
@Override
public Category getCategory(int id) {
Category category = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
category = parser.readValue(response.getEntity(String.class), Category.class);
} catch (Exception ex) {
logger.error(ex);
}
return category;
}
@Override
public void addCategory(String name) {
Category category = new Category(name);
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
webResource.type(MediaType.APPLICATION_JSON_TYPE)
.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.post(ClientResponse.class, parser.writeValueAsString(category));
} catch (Exception ex) {
logger.error(ex);
}
}
@Override
public void delCategoryById(int id) {
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.delete();
} catch (Exception ex) {
logger.error(ex);
}
}
}
| Am3o/eShop | WebShopStart/src/main/java/hska/iwi/eShopMaster/model/businessLogic/manager/impl/CategoryManagerImpl.java | Java | apache-2.0 | 3,084 |
function f() {
var x=arguments[12];
} | freedot/tstolua | tests/cases/compiler/arguments.ts | TypeScript | apache-2.0 | 41 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesActionsTest(base.BaseVolumeTest):
"""Test volume actions"""
create_default_network = True
@classmethod
def resource_setup(cls):
super(VolumesActionsTest, cls).resource_setup()
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
@decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
@decorators.attr(type='smoke')
@utils.services('compute')
def test_attach_detach_volume_to_instance(self):
"""Test attaching and detaching volume to instance"""
# Create a server
server = self.create_server()
# Volume is attached and detached successfully from an instance
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'in-use')
self.volumes_client.detach_volume(self.volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
@decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
def test_volume_bootable(self):
"""Test setting and retrieving bootable flag of a volume"""
for bool_bootable in [True, False]:
self.volumes_client.set_bootable_volume(self.volume['id'],
bootable=bool_bootable)
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# Get Volume information
# NOTE(masayukig): 'bootable' is "true" or "false" in the current
# cinder implementation. So we need to cast boolean values to str
# and make it lower to compare here.
self.assertEqual(str(bool_bootable).lower(),
fetched_volume['bootable'])
@decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
@utils.services('compute')
def test_get_volume_attachment(self):
"""Test getting volume attachments
Attach a volume to a server, and then retrieve volume's attachments
info.
"""
# Create a server
server = self.create_server()
# Verify that a volume's attachment information is retrieved
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'],
'in-use')
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client,
self.volume['id'], 'available')
self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
volume = self.volumes_client.show_volume(self.volume['id'])['volume']
attachment = volume['attachments'][0]
self.assertEqual('/dev/%s' %
CONF.compute.volume_device_name,
attachment['device'])
self.assertEqual(server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
@utils.services('image')
def test_volume_upload(self):
"""Test uploading volume to create an image"""
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance images_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
body = self.volumes_client.upload_volume(
self.volume['id'], image_name=image_name,
disk_format=CONF.volume.disk_format)['os-volume_upload_image']
image_id = body["image_id"]
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image,
image_id)
waiters.wait_for_image_status(self.images_client, image_id, 'active')
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
image_info = self.images_client.show_image(image_id)
self.assertEqual(image_name, image_info['name'])
self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
"""Test reserving and unreserving volume"""
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('attaching', body['status'])
# Unmark volume as reserved.
self.volumes_client.unreserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('available', body['status'])
@decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
def test_volume_readonly_update(self):
"""Test updating and retrieve volume's readonly flag"""
for readonly in [True, False]:
# Update volume readonly
self.volumes_client.update_volume_readonly(self.volume['id'],
readonly=readonly)
# Get Volume information
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# NOTE(masayukig): 'readonly' is "True" or "False" in the current
# cinder implementation. So we need to cast boolean values to str
# to compare here.
self.assertEqual(str(readonly),
fetched_volume['metadata']['readonly'])
| openstack/tempest | tempest/api/volume/test_volumes_actions.py | Python | apache-2.0 | 7,582 |
/** @file
An example program which illustrates adding and manipulating an
HTTP response MIME header:
Usage: response_header_1.so
add read_resp_header hook
get http response header
if 200, then
add mime extension header with count of zero
add mime extension header with date response was received
add "Cache-Control: public" header
else if 304, then
retrieve cached header
get old value of mime header count
increment mime header count
store mime header with new count
@section license License
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed
with this work for additional information regarding copyright
ownership. The ASF licenses this file to you under the Apache
License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "ts/ts.h"
#include "ts/ink_defs.h"
#define PLUGIN_NAME "response_header_1"
static int init_buffer_status;
static char *mimehdr1_name;
static char *mimehdr2_name;
static char *mimehdr1_value;
static TSMBuffer hdr_bufp;
static TSMLoc hdr_loc;
static TSMLoc field_loc;
static TSMLoc value_loc;
static void
modify_header(TSHttpTxn txnp)
{
TSMBuffer resp_bufp;
TSMBuffer cached_bufp;
TSMLoc resp_loc;
TSMLoc cached_loc;
TSHttpStatus resp_status;
TSMLoc new_field_loc;
TSMLoc cached_field_loc;
time_t recvd_time;
const char *chkptr;
int chklength;
int num_refreshes = 0;
if (!init_buffer_status) {
return; /* caller reenables */
}
if (TSHttpTxnServerRespGet(txnp, &resp_bufp, &resp_loc) != TS_SUCCESS) {
TSError("[%s] Couldn't retrieve server response header", PLUGIN_NAME);
return; /* caller reenables */
}
/* TSqa06246/TSqa06144 */
resp_status = TSHttpHdrStatusGet(resp_bufp, resp_loc);
if (TS_HTTP_STATUS_OK == resp_status) {
TSDebug(PLUGIN_NAME, "Processing 200 OK");
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
/* copy name/values created at init
* ( "x-num-served-from-cache" ) : ( "0" )
*/
TSMimeHdrFieldCopy(resp_bufp, resp_loc, new_field_loc, hdr_bufp, hdr_loc, field_loc);
/*********** Unclear why this is needed **************/
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
/* Cache-Control: Public */
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, TS_MIME_FIELD_CACHE_CONTROL, TS_MIME_LEN_CACHE_CONTROL);
TSMimeHdrFieldValueStringInsert(resp_bufp, resp_loc, new_field_loc, -1, TS_HTTP_VALUE_PUBLIC, TS_HTTP_LEN_PUBLIC);
/*
* mimehdr2_name = TSstrdup( "x-date-200-recvd" ) : CurrentDateTime
*/
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, mimehdr2_name, strlen(mimehdr2_name));
recvd_time = time(NULL);
TSMimeHdrFieldValueDateInsert(resp_bufp, resp_loc, new_field_loc, recvd_time);
TSHandleMLocRelease(resp_bufp, resp_loc, new_field_loc);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
} else if (TS_HTTP_STATUS_NOT_MODIFIED == resp_status) {
TSDebug(PLUGIN_NAME, "Processing 304 Not Modified");
/* N.B.: Protect writes to data (hash on URL + mutex: (ies)) */
/* Get the cached HTTP header */
if (TSHttpTxnCachedRespGet(txnp, &cached_bufp, &cached_loc) != TS_SUCCESS) {
TSError("[%s] STATUS 304, TSHttpTxnCachedRespGet():", PLUGIN_NAME);
TSError("[%s] Couldn't retrieve cached response header", PLUGIN_NAME);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
return; /* Caller reenables */
}
/* Get the cached MIME field name for this HTTP header */
cached_field_loc = TSMimeHdrFieldFind(cached_bufp, cached_loc, (const char *)mimehdr1_name, strlen(mimehdr1_name));
if (TS_NULL_MLOC == cached_field_loc) {
TSError("[%s] Can't find header %s in cached document", PLUGIN_NAME, mimehdr1_name);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
return; /* Caller reenables */
}
/* Get the cached MIME value for this name in this HTTP header */
chkptr = TSMimeHdrFieldValueStringGet(cached_bufp, cached_loc, cached_field_loc, -1, &chklength);
if (NULL == chkptr || !chklength) {
TSError("[%s] Could not find value for cached MIME field name %s", PLUGIN_NAME, mimehdr1_name);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
TSHandleMLocRelease(cached_bufp, cached_loc, cached_field_loc);
return; /* Caller reenables */
}
TSDebug(PLUGIN_NAME, "Header field value is %s, with length %d", chkptr, chklength);
/* Get the cached MIME value for this name in this HTTP header */
/*
TSMimeHdrFieldValueUintGet(cached_bufp, cached_loc, cached_field_loc, 0, &num_refreshes);
TSDebug(PLUGIN_NAME,
"Cached header shows %d refreshes so far", num_refreshes );
num_refreshes++ ;
*/
/* txn origin server response for this transaction stored
* in resp_bufp, resp_loc
*
* Create a new MIME field/value. Cached value has been incremented.
* Insert new MIME field/value into the server response buffer,
* allow HTTP processing to continue. This will update
* (indirectly invalidates) the cached HTTP headers MIME field.
* It is apparently not necessary to update all of the MIME fields
* in the in-process response in order to have the cached response
* become invalid.
*/
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probaby should check for errrors */
/* mimehdr1_name : TSstrdup( "x-num-served-from-cache" ) ; */
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, mimehdr1_name, strlen(mimehdr1_name));
TSMimeHdrFieldValueUintInsert(resp_bufp, resp_loc, new_field_loc, -1, num_refreshes);
TSHandleMLocRelease(resp_bufp, resp_loc, new_field_loc);
TSHandleMLocRelease(cached_bufp, cached_loc, cached_field_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
} else {
TSDebug(PLUGIN_NAME, "other response code %d", resp_status);
}
/*
* Additional 200/304 processing can go here, if so desired.
*/
/* Caller reneables */
}
static int
modify_response_header_plugin(TSCont contp ATS_UNUSED, TSEvent event, void *edata)
{
TSHttpTxn txnp = (TSHttpTxn)edata;
switch (event) {
case TS_EVENT_HTTP_READ_RESPONSE_HDR:
TSDebug(PLUGIN_NAME, "Called back with TS_EVENT_HTTP_READ_RESPONSE_HDR");
modify_header(txnp);
TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
/* fall through */
default:
break;
}
return 0;
}
void
TSPluginInit(int argc, const char *argv[])
{
TSMLoc chk_field_loc;
TSPluginRegistrationInfo info;
info.plugin_name = PLUGIN_NAME;
info.vendor_name = "Apache Software Foundation";
info.support_email = "[email protected]";
if (TSPluginRegister(&info) != TS_SUCCESS) {
TSError("[%s] Plugin registration failed", PLUGIN_NAME);
}
init_buffer_status = 0;
if (argc > 1) {
TSError("[%s] usage: %s", PLUGIN_NAME, argv[0]);
TSError("[%s] warning: too many args %d", PLUGIN_NAME, argc);
TSError("[%s] warning: ignoring unused arguments beginning with %s", PLUGIN_NAME, argv[1]);
}
/*
* The following code sets up an "init buffer" containing an extension header
* and its initial value. This will be the same for all requests, so we try
* to be efficient and do all of the work here rather than on a per-transaction
* basis.
*/
hdr_bufp = TSMBufferCreate();
TSMimeHdrCreate(hdr_bufp, &hdr_loc);
mimehdr1_name = TSstrdup("x-num-served-from-cache");
mimehdr1_value = TSstrdup("0");
/* Create name here and set DateTime value when o.s.
* response 200 is received
*/
mimehdr2_name = TSstrdup("x-date-200-recvd");
TSDebug(PLUGIN_NAME, "Inserting header %s with value %s into init buffer", mimehdr1_name, mimehdr1_value);
TSMimeHdrFieldCreate(hdr_bufp, hdr_loc, &field_loc); /* Probably should check for errors */
TSMimeHdrFieldAppend(hdr_bufp, hdr_loc, field_loc);
TSMimeHdrFieldNameSet(hdr_bufp, hdr_loc, field_loc, mimehdr1_name, strlen(mimehdr1_name));
TSMimeHdrFieldValueStringInsert(hdr_bufp, hdr_loc, field_loc, -1, mimehdr1_value, strlen(mimehdr1_value));
TSDebug(PLUGIN_NAME, "init buffer hdr, field and value locs are %p, %p and %p", hdr_loc, field_loc, value_loc);
init_buffer_status = 1;
TSHttpHookAdd(TS_HTTP_READ_RESPONSE_HDR_HOOK, TSContCreate(modify_response_header_plugin, NULL));
/*
* The following code demonstrates how to extract the field_loc from the header.
* In this plugin, the init buffer and thus field_loc never changes. Code
* similar to this may be used to extract header fields from any buffer.
*/
if (TS_NULL_MLOC == (chk_field_loc = TSMimeHdrFieldGet(hdr_bufp, hdr_loc, 0))) {
TSError("[%s] Couldn't retrieve header field from init buffer", PLUGIN_NAME);
TSError("[%s] Marking init buffer as corrupt; no more plugin processing", PLUGIN_NAME);
init_buffer_status = 0;
/* bail out here and reenable transaction */
} else {
if (field_loc != chk_field_loc) {
TSError("[%s] Retrieved buffer field loc is %p when it should be %p", PLUGIN_NAME, chk_field_loc, field_loc);
}
}
}
| persiaAziz/trafficserver | example/response_header_1/response_header_1.c | C | apache-2.0 | 10,717 |
import App from '../containers/App';
import { PageNotFound } from '../components';
import homeRoute from '../features/home/route';
import taggrRoute from '../features/taggr/route';
const routes = [{
path: '/',
component: App,
childRoutes: [
homeRoute,
taggrRoute,
{ path: '*', name: 'Page not found', component: PageNotFound },
],
}];
// Handle isIndex property of route config:
// 1. remove the first child with isIndex=true from childRoutes
// 2. assign it to the indexRoute property of the parent.
function handleIndexRoute(route) {
if (!route.childRoutes || !route.childRoutes.length) {
return;
}
route.childRoutes = route.childRoutes.filter(child => { // eslint-disable-line
if (child.isIndex) {
/* istanbul ignore next */
if (process.env.NODE_ENV === 'dev' && route.indexRoute) {
console.error('More than one index route: ', route);
}
/* istanbul ignore else */
if (!route.indexRoute) {
delete child.path; // eslint-disable-line
route.indexRoute = child; // eslint-disable-line
return false;
}
}
return true;
});
route.childRoutes.forEach(handleIndexRoute);
}
routes.forEach(handleIndexRoute);
export default routes;
| thehig/taggr | src/common/routeConfig.js | JavaScript | apache-2.0 | 1,246 |
/**
* Created by txs on 2016/10/17.
*/
public class Student {
String name;
int grade;
@Override
public String toString() {
String temp = "";
temp += "name: " + name + "\n";
temp += "grade: " + grade + "\n";
return temp;
}
@Override
public boolean equals(Object obj) {
if(this==obj) return true;
boolean r = false;
if(obj instanceof Student){
Student temp = (Student)obj;
if(this.name.equals(temp.name)
&& this.grade == temp.grade)
r = true;
}
return r;
}
}
| txs72/BUPTJava | slides/06/overrding/Student.java | Java | apache-2.0 | 625 |
# Arduino Debug
## Install
### 1. install Arduino IDE (free)
www.arduino.cc > download
### 2. install Microsoft Visual Studio 2015 Community Edition (free)
* get vs2015.com_chs.iso
* select C++ from the available VS2015 setup options.
>It requires Visual Studio C++ to be installed before Arduino projects can be created or opened.
### 3. install plugin
https://visualstudiogallery.msdn.microsoft.com/069a905d-387d-4415-bc37-665a5ac9caba
## Use
### Visual Studio 2015 Community
1. Open File (Ctrl + O), select Arduino src file.
2. add breakpoint by click line number.
3. add watching expressiong by adding actions like {x} {y}
4. run debug
| usedtolove/ros-study | docs/2.Intermediate/04.arduino_debug.md | Markdown | apache-2.0 | 647 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class io.permazen.util.NavigableSetPager (Permazen 4.1.9-SNAPSHOT API)</title>
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class io.permazen.util.NavigableSetPager (Permazen 4.1.9-SNAPSHOT API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../io/permazen/util/NavigableSetPager.html" title="class in io.permazen.util">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?io/permazen/util/class-use/NavigableSetPager.html" target="_top">Frames</a></li>
<li><a href="NavigableSetPager.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class io.permazen.util.NavigableSetPager" class="title">Uses of Class<br>io.permazen.util.NavigableSetPager</h2>
</div>
<div class="classUseContainer">No usage of io.permazen.util.NavigableSetPager</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../io/permazen/util/NavigableSetPager.html" title="class in io.permazen.util">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?io/permazen/util/class-use/NavigableSetPager.html" target="_top">Frames</a></li>
<li><a href="NavigableSetPager.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2022. All rights reserved.</small></p>
</body>
</html>
| permazen/permazen | site/apidocs/io/permazen/util/class-use/NavigableSetPager.html | HTML | apache-2.0 | 4,415 |
using System;
using System.IO;
using System.Reflection;
using Moq;
using NUnit.Framework;
using Weald.Service;
namespace Weald.Tests
{
[TestFixture]
public class VisualSvnServerInfoProviderTests
{
private string _tempFilePath;
[SetUp]
public void SetUp()
{
_tempFilePath = Path.GetTempFileName();
}
[TearDown]
public void TearDown()
{
try
{
File.Delete(_tempFilePath);
}
catch
{
}
}
[Test]
public void NonExistentServiceExecutableMeansNothingWorks()
{
var mockServerPathProvider = new Mock<IProvideVisualSvnServerPaths>();
mockServerPathProvider.Setup(x => x.ServiceExePath)
.Returns(Guid.NewGuid().ToString());
var mockWebConfigProvider = new Mock<IProvideWebConfiguration>();
mockWebConfigProvider.Setup(x => x.GetValue("SvnServerAlias"))
.Returns("Foo");
var serverInfo = new VisualSvnServerInfoProvider(mockServerPathProvider.Object, mockWebConfigProvider.Object);
Assert.IsFalse(serverInfo.IsVisualSvnServerInstalled);
Assert.IsNullOrEmpty(serverInfo.RepoStoragePath);
Assert.IsNullOrEmpty(serverInfo.SvnLookExePath);
}
[Test]
public void CanGetNormalizedRepoStoragePath()
{
File.WriteAllLines(_tempFilePath, new[] { "FOO", "#BAR", " SVNParentPath \"E:/svn/repos/\"", " BAZ" });
var mockServerPathProvider = new Mock<IProvideVisualSvnServerPaths>();
mockServerPathProvider.Setup(x => x.ServiceExePath)
.Returns(Assembly.GetExecutingAssembly().Location);
mockServerPathProvider.Setup(x => x.ServiceConfigFilePath)
.Returns(_tempFilePath);
mockServerPathProvider.Setup(x => x.ServiceBinDirectory)
.Returns("C:\\Foo");
var mockWebConfigProvider = new Mock<IProvideWebConfiguration>();
mockWebConfigProvider.Setup(x => x.GetValue("SvnServerAlias"))
.Returns("Foo");
var serverInfo = new VisualSvnServerInfoProvider(mockServerPathProvider.Object, mockWebConfigProvider.Object);
Assert.IsTrue(serverInfo.IsVisualSvnServerInstalled);
Assert.IsNotNullOrEmpty(serverInfo.RepoStoragePath);
Assert.AreEqual(@"e:\svn\repos", serverInfo.RepoStoragePath.ToLowerInvariant());
}
}
}
| devopsonwindows/Weald | Weald.Tests/VisualSvnServerInfoProviderTests.cs | C# | apache-2.0 | 2,676 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.