code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.dataformat.univocity;
import com.univocity.parsers.common.ParsingContext;
import com.univocity.parsers.common.processor.RowProcessor;
/**
* This class is used by the unmarshaller in order to retrieve the headers.
*/
final class HeaderRowProcessor implements RowProcessor {
private String[] headers;
/**
* Called when the processing starts, it clears the headers
*
* @param context Parsing context
*/
@Override
public void processStarted(ParsingContext context) {
headers = null;
}
/**
* Called when a row is processed, it retrieve the headers if necessary.
*
* @param row Processed row
* @param context Parsing context
*/
@Override
public void rowProcessed(String[] row, ParsingContext context) {
if (headers == null) {
headers = context.headers();
}
}
/**
* Called when the processing completes, it clears the headers.
*
* @param context Parsing context
*/
@Override
public void processEnded(ParsingContext context) {
headers = null;
}
/**
* Gets the headers.
*
* @return the headers
*/
public String[] getHeaders() {
return headers;
}
}
| nikhilvibhav/camel | components/camel-univocity-parsers/src/main/java/org/apache/camel/dataformat/univocity/HeaderRowProcessor.java | Java | apache-2.0 | 2,080 |
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Commands.ScenarioTest.SqlTests;
using Microsoft.Azure.ServiceManagemenet.Common.Models;
using Microsoft.WindowsAzure.Commands.ScenarioTest;
using Xunit;
using Xunit.Abstractions;
namespace Microsoft.Azure.Commands.Sql.Test.ScenarioTests
{
public class TransparentDataEncryptionCrudTests : SqlTestsBase
{
public TransparentDataEncryptionCrudTests(ITestOutputHelper output)
{
XunitTracingInterceptor.AddToContext(new XunitTracingInterceptor(output));
}
[Fact]
[Trait(Category.AcceptanceType, Category.CheckIn)]
public void TestDatabaseTransparentDataEncryptionUpdate()
{
RunPowerShellTest("Test-UpdateTransparentDataEncryption");
}
[Fact]
[Trait(Category.AcceptanceType, Category.CheckIn)]
public void TestDatabaseTransparentDataEncryptionGet()
{
RunPowerShellTest("Test-GetTransparentDataEncryption");
}
}
}
| hovsepm/azure-powershell | src/ResourceManager/Sql/Commands.Sql.Test/ScenarioTests/TransparentDataEncryptionCrudTests.cs | C# | apache-2.0 | 1,743 |
/**
* Appcelerator Titanium Mobile
* Copyright (c) 2009-2010 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Apache Public License
* Please see the LICENSE included with this distribution for details.
*/
#import <Foundation/Foundation.h>
/* You would expect these macros to be used in the header files, but because they can generate checking code, they should be in the .m files instead.*/
/* This declares that the class is publically viewable, and that should be considered a subclass of the closest superclass that also marked with a TI_PUBLIC_CLASS */
/* To speed lookup, this will produce a method on the module to generate an instance.*/
#define TI_PUBLIC_CLASS(moduleName, className) \
@class moduleName##Module; \
@interface moduleName \
##Module(className##_generation) \
- (id)create##className : (id)args; \
@end \
@implementation moduleName \
##Module(className##_generation) \
- (TiProxy *)create##className : (id)args \
{ \
TiProxy *result = [[Ti##moduleName##className##Proxy alloc] _initWithPageContext:[self executionContext] args:args]; \
return [result autorelease]; \
} \
@end
#ifdef DEBUG
#define TI_PUBLIC_METHOD(methodName, returnType) \
-(returnType)methodName : (id)args \
{ \
int argCount = [args count];
#define TI_PUBLIC_METHOD_ARG_OBJECT(argPosition, argName, argType, argOptional, argCheck) \
argType *argName = nil; \
if (argCount < argPosition) { \
argType *argName = [(NSArray *)args objectAtIndex:argPosition]; \
if (![argName isKindOfClass:[argType class]]) { \
[self throwException:TiExceptionInvalidType \
subreason: \
[NSString stringWithFormat:@"argument #%d (%s) needs to be of type %s, but was %@ instead.", \
argPosition, #argName, #argType, [argName class]] \
location:CODELOCATION]; \
} \
argCheck; \
} else if (!argOptional) { \
[self throwException:TiExceptionNotEnoughArguments \
subreason:[NSString stringWithFormat:@"argument #%d (%s) was missing and is not optional", argPosition, #argName] \
location:CODELOCATION]; \
}
#define TI_PUBLIC_METHOD_END_ARGS(methodName, returnType) \
if (![@"void" isEqualToString:@"" #returnType]) { \
return [self methodName##_CONTINUE:args]; \
} \
} \
-(returnType)methodName##_CONTINUE : (id)args
#else
#define TI_PUBLIC_METHOD(methodName, returnType) \
//No-op
#define TI_PUBLIC_METHOD_ARG_OBJECT(argPosition, argName, argType, argOptional, argCheck) \
//No-op
#define TI_PUBLIC_METHOD_END_ARGS(methodName, returnType) \
-(returnType)methodName : (id)args
#endif //Debug
| mano-mykingdom/titanium_mobile | iphone/TitaniumKit/TitaniumKit/Sources/API/TiPublicAPI.h | C | apache-2.0 | 5,224 |
A Docker swarm cluster with Heat
==============================
These [Heat][] templates will deploy an *N*-node [swarm][] cluster,
where *N* is the value of the `number_of_nodes` parameter you
specify when creating the stack.
[heat]: https://wiki.openstack.org/wiki/Heat
[swarm]: https://github.com/docker/swarm/
## Requirements
### OpenStack
These templates will work with the Juno version of Heat.
### Guest image
These templates will work with either CentOS Atomic Host or Fedora 21
Atomic.
## Creating the stack
First, you must create a swarm token, which is used to uniquely identify
the cluster to the global discovery service. This can be done by issuing
a create call to the swarm CLI. Alternatively, if you have access to
Docker you can use the dockerswarm/swarm image.
$ swarm create
afeb445bcb2f573aeb8ff3a199785f45
$ docker run dockerswarm/swarm create
d8cdfe5128af6e1075b34aa06ff1cc2c
Creating an environment file `local.yaml` with parameters specific to
your environment:
parameters:
ssh_key_name: apmelton
external_network: 028d70dd-67b8-4901-8bdd-0c62b06cce2d
dns_nameserver: 192.168.200.1
server_image: fedora-21-atomic-3
discovery_url: token://d8cdfe5128af6e1075b34aa06ff1cc2c
And then create the stack, referencing that environment file:
heat stack-create -f swarm.yaml -e local.yaml my-swarm-cluster
You must provide values for:
- `ssh_key_name`
- `external_network`
- `server_image`
- `discovery_url`
## Interacting with Swarm
The Docker CLI interacts with the cluster through the swarm manager
listening on port 2376.
You can get the ip address of the swarm manager using the `heat
output-show` command:
$ heat output-show my-swarm-cluster swarm_manager
"192.168.200.86"
Provide the Docker CLI with the address for the swarm manager.
$ docker -H tcp://192.168.200.86:2376 info
Containers: 4
Nodes: 3
swarm-manager: 10.0.0.1:2375
swarm-node1: 10.0.0.2:2375
swarm-node2: 10.0.0.3:2375
## Testing
You can test the swarm cluster with the Docker CLI by running a container.
In the example below, a container is spawned in the cluster to ping 8.8.8.8.
$ docker -H tcp://192.168.200.86:2376 run -i cirros /bin/ping -c 4 8.8.8.8
PING 8.8.8.8 (8.8.8.8): 56 data bytes
64 bytes from 8.8.8.8: seq=0 ttl=127 time=40.749 ms
64 bytes from 8.8.8.8: seq=1 ttl=127 time=46.264 ms
64 bytes from 8.8.8.8: seq=2 ttl=127 time=42.808 ms
64 bytes from 8.8.8.8: seq=3 ttl=127 time=42.270 ms
--- 8.8.8.8 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 40.749/43.022/46.264 ms
## License
Copyright 2014 Lars Kellogg-Stedman <[email protected]>
Copyright 2015 Rackspace Hosting
Licensed under the Apache License, Version 2.0 (the "License");
you may not use these files except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| ddepaoli3/magnum | magnum/templates/docker-swarm/README.md | Markdown | apache-2.0 | 3,308 |
/*jshint unused: false */
/*global beforeEach, afterEach */
/*global describe, it, expect, jasmine */
/*global runs, spyOn, waitsFor, waits */
/*global window, eb, loadFixtures, document */
/*global $, _, d3*/
/*global describeInterface, describeIntegeration*/
/*global ArangoAdapter*/
////////////////////////////////////////////////////////////////////////////////
/// @brief Graph functionality
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Michael Hackstein
/// @author Copyright 2011-2013, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
(function () {
"use strict";
describe('Arango Adapter', function () {
describeInterface(new ArangoAdapter([], [], {}, {
nodeCollection: "",
edgeCollection: ""
}));
describeIntegeration(function() {
spyOn($, "ajax").andCallFake(function(req) {
var node1 = {_id: 1},
node2 = {_id: 2},
edge = {_id: "1-2", _from: 1, _to: 2};
switch(req.type) {
case "DELETE":
req.success();
break;
case "POST":
if (req.url.match(/_api\/cursor$/)) {
req.success({result:
[
[{
vertex: node1,
path: {
edges: [],
vertices: [
node1
]
}
},{
vertex: node2,
path: {
edges: [
edge
],
vertices: [
node1,
node2
]
}
}]
]});
} else if (req.url.match(/_api\/edge/)) {
req.success({_id: "1-2"});
} else {
req.success({
_id: 1,
error: false
});
}
break;
default:
req.success();
}
});
return new ArangoAdapter([], [], {}, {
nodeCollection: "",
edgeCollection: "",
prioList: ["foo", "bar", "baz"]
});
});
var adapter,
nodes,
edges,
viewer,
arangodb = "http://localhost:8529",
nodesCollection,
altNodesCollection,
edgesCollection,
altEdgesCollection,
mockCollection,
callbackCheck,
checkCallbackFunction = function(test) {
callbackCheck = true;
},
getCommunityNodes = function() {
return _.filter(nodes, function(n) {
return n._isCommunity;
});
},
getCommunityNodesIds = function() {
return _.pluck(getCommunityNodes(), "_id");
},
nodeWithID = function(id) {
return $.grep(nodes, function(e){
return e._id === id;
})[0];
},
edgeWithSourceAndTargetId = function(sourceId, targetId) {
return $.grep(edges, function(e){
return e.source._id === sourceId
&& e.target._id === targetId;
})[0];
},
existNode = function(id) {
var node = nodeWithID(id);
expect(node).toBeDefined();
expect(node._id).toEqual(id);
},
notExistNode = function(id) {
var node = nodeWithID(id);
expect(node).toBeUndefined();
},
existEdge = function(source, target) {
var edge = edgeWithSourceAndTargetId(source, target);
expect(edge).toBeDefined();
expect(edge.source._id).toEqual(source);
expect(edge.target._id).toEqual(target);
},
notExistEdge = function(source, target) {
var edge = edgeWithSourceAndTargetId(source, target);
expect(edge).toBeUndefined();
},
existNodes = function(ids) {
_.each(ids, existNode);
},
notExistNodes = function(ids) {
_.each(ids, notExistNode);
},
insertEdge = function (collectionID, from, to, cont) {
var key = String(Math.floor(Math.random()*100000)),
id = collectionID + "/" + key;
cont = cont || {};
mockCollection[collectionID] = mockCollection[collectionID] || {};
mockCollection[collectionID][from] = mockCollection[collectionID][from] || {};
cont._id = id;
cont._key = key;
cont._rev = key;
cont._from = from;
cont._to = to;
mockCollection[collectionID][from][to] = cont;
return id;
},
insertNode = function (collectionID, nodeId, cont) {
var key = String(Math.floor(Math.random()*100000)),
id = collectionID + "/" + key;
cont = cont || {};
mockCollection[collectionID] = mockCollection[collectionID] || {};
cont.id = nodeId;
cont._id = id;
cont._key = key;
cont._rev = key;
mockCollection[collectionID][id] = cont;
return id;
},
readEdge = function (collectionID, from, to) {
return mockCollection[collectionID][from._id][to._id];
},
readNode = function (collectionID, id) {
return mockCollection[collectionID][id];
},
constructPath = function(colNodes, colEdges, from, to) {
var obj = {},
src = readNode(colNodes, from),
tar = readNode(colNodes, to);
obj.vertex = tar;
obj.path = {
edges: [
readEdge(colEdges, src, tar)
],
vertices: [
src,
tar
]
};
return obj;
};
beforeEach(function() {
nodes = [];
edges = [];
mockCollection = {};
viewer = {
cleanUp: function(){}
};
nodesCollection = "TestNodes321";
edgesCollection = "TestEdges321";
altNodesCollection = "TestNodes654";
altEdgesCollection = "TestEdges654";
this.addMatchers({
toHaveCorrectCoordinates: function() {
var list = this.actual,
evil;
_.each(list, function(n) {
if (isNaN(n.x) || isNaN(n.y)) {
evil = n;
}
});
this.message = function() {
return "Expected " + JSON.stringify(evil) + " to contain Numbers as X and Y.";
};
return evil === undefined;
}
});
});
afterEach(function() {
expect(nodes).toHaveCorrectCoordinates();
});
it('should throw an error if no nodes are given', function() {
expect(
function() {
var t = new ArangoAdapter();
}
).toThrow("The nodes have to be given.");
});
it('should throw an error if no edges are given', function() {
expect(
function() {
var t = new ArangoAdapter([]);
}
).toThrow("The edges have to be given.");
});
it('should throw an error if a reference to the graph viewer is not given', function() {
expect(
function() {
var t = new ArangoAdapter([], []);
}
).toThrow("A reference to the graph viewer has to be given.");
});
it('should throw an error if no nodeCollection or graph is given', function() {
expect(
function() {
var t = new ArangoAdapter([], [], viewer, {
edgeCollection: ""
});
}
).toThrow("The nodeCollection or a graphname has to be given.");
});
it('should throw an error if no edgeCollection or graph is given', function() {
expect(
function() {
var t = new ArangoAdapter([], [], viewer, {
nodeCollection: ""
});
}
).toThrow("The edgeCollection or a graphname has to be given.");
});
it('should not throw an error if everything is given', function() {
expect(
function() {
var t = new ArangoAdapter([], [], viewer, {
nodeCollection: "",
edgeCollection: ""
});
}
).not.toThrow();
});
it('should automatically determine the host of not given', function() {
var adapter = new ArangoAdapter(
nodes,
edges,
viewer,
{
nodeCollection: nodesCollection,
edgeCollection: edgesCollection,
width: 100,
height: 40
}
),
args,
host;
spyOn($, "ajax");
adapter.createNode({}, function() {});
args = $.ajax.mostRecentCall.args[0];
expect(args.url).not.toContain("http");
});
it('should create a nodeReducer instance', function() {
spyOn(window, "NodeReducer");
var adapter = new ArangoAdapter(
nodes,
edges,
viewer,
{
nodeCollection: nodesCollection,
edgeCollection: edgesCollection,
width: 100,
height: 40
}
);
expect(window.NodeReducer).wasCalledWith();
});
it('should create the ModularityJoiner as a worker', function() {
spyOn(window, "WebWorkerWrapper");
var adapter = new ArangoAdapter(
nodes,
edges,
viewer,
{
nodeCollection: nodesCollection,
edgeCollection: edgesCollection,
width: 100,
height: 40
}
);
expect(window.WebWorkerWrapper).wasCalledWith(
window.ModularityJoiner,
jasmine.any(Function)
);
});
describe('setup correctly', function() {
var traversalQuery,
filterQuery,
childrenQuery,
loadGraph,
requests,
mockWrapper,
workerCB;
beforeEach(function() {
var self = this,
apibase = "_api/",
apiCursor = apibase + 'cursor';
self.fakeReducerBucketRequest = function() {};
mockWrapper = {};
mockWrapper.call = function() {};
spyOn(window, "NodeReducer").andCallFake(function() {
return {
bucketNodes: function(toSort, numBuckets) {
return self.fakeReducerBucketRequest(toSort, numBuckets);
}
};
});
spyOn(window, "WebWorkerWrapper").andCallFake(function(c, cb) {
workerCB = cb;
return {
call: function() {
mockWrapper.call.apply(
mockWrapper,
Array.prototype.slice.call(arguments)
);
}
};
});
adapter = new ArangoAdapter(
nodes,
edges,
viewer,
{
nodeCollection: nodesCollection,
edgeCollection: edgesCollection,
width: 100,
height: 40
}
);
traversalQuery = function(id, nods, edgs, undirected) {
var dir;
if (undirected === true) {
dir = "any";
} else {
dir = "outbound";
}
return JSON.stringify({
query: "RETURN TRAVERSAL(@@nodes, @@edges, @id, @dir,"
+ " {strategy: \"depthfirst\",maxDepth: 1,paths: true})",
bindVars: {
id: id,
"@nodes": nods,
dir: dir,
"@edges": edgs
}
});
};
filterQuery = function(v, nods, edgs, undirected) {
var dir;
if (undirected === true) {
dir = "any";
} else {
dir = "outbound";
}
return JSON.stringify({
query: "FOR n IN @@nodes FILTER n.id == @value"
+ " RETURN TRAVERSAL(@@nodes, @@edges, n._id, @dir,"
+ " {strategy: \"depthfirst\",maxDepth: 1,paths: true})",
bindVars: {
value: v,
"@nodes": nods,
dir: dir,
"@edges": edgs
}
});
};
childrenQuery = function(id, nods, edgs) {
return JSON.stringify({
query: "FOR u IN @@nodes FILTER u._id == @id"
+ " LET g = ( FOR l in @@edges FILTER l._from == u._id RETURN 1 )"
+ " RETURN length(g)",
bindVars: {
id: id,
"@nodes": nods,
"@edges": edgs
}
});
};
loadGraph = function(vars) {
var nid = vars.id,
ncol = vars["@nodes"],
ecol = vars["@edges"],
res = [],
inner = [],
first = {},
node1 = readNode(ncol, nid);
res.push(inner);
first.vertex = node1;
first.path = {
edges: [],
vertices: [
node1
]
};
inner.push(first);
if (mockCollection[ecol][nid] !== undefined) {
_.each(mockCollection[ecol][nid], function(val, key) {
inner.push(constructPath(ncol, ecol, nid, key));
});
}
return res;
};
requests = {};
requests.cursor = function(data) {
return {
type: 'POST',
url: apiCursor,
data: data,
contentType: 'application/json',
dataType: 'json',
success: jasmine.any(Function),
error: jasmine.any(Function),
processData: false
};
};
requests.node = function(col) {
var read = apibase + "document?collection=" + col,
write = apibase + "document/",
base = {
cache: false,
dataType: "json",
contentType: "application/json",
processData: false,
success: jasmine.any(Function),
error: jasmine.any(Function)
};
return {
create: function(data) {
return $.extend(base, {url: read, type: "POST", data: JSON.stringify(data)});
},
patch: function(id, data) {
return $.extend(base, {url: write + id, type: "PUT", data: JSON.stringify(data)});
},
del: function(id) {
return $.extend(base, {url: write + id, type: "DELETE"});
}
};
};
requests.edge = function(col) {
var create = apibase + "edge?collection=" + col,
base = {
cache: false,
dataType: "json",
contentType: "application/json",
processData: false,
success: jasmine.any(Function),
error: jasmine.any(Function)
};
return {
create: function(from, to, data) {
return $.extend(base, {
url: create + "&from=" + from + "&to=" + to,
type: "POST",
data: JSON.stringify(data)
});
}
};
};
});
it('should offer lists of available collections', function() {
var collections = [],
sys1 = {id: "1", name: "_sys1", status: 3, type: 2},
sys2 = {id: "2", name: "_sys2", status: 2, type: 2},
doc1 = {id: "3", name: "doc1", status: 3, type: 2},
doc2 = {id: "4", name: "doc2", status: 2, type: 2},
doc3 = {id: "5", name: "doc3", status: 3, type: 2},
edge1 = {id: "6", name: "edge1", status: 3, type: 3},
edge2 = {id: "7", name: "edge2", status: 2, type: 3};
collections.push(sys1);
collections.push(sys2);
collections.push(doc1);
collections.push(doc2);
collections.push(doc3);
collections.push(edge1);
collections.push(edge2);
spyOn($, "ajax").andCallFake(function(request) {
request.success({collections: collections});
});
adapter.getCollections(function(docs, edge) {
expect(docs).toContain("doc1");
expect(docs).toContain("doc2");
expect(docs).toContain("doc3");
expect(docs.length).toEqual(3);
expect(edge).toContain("edge1");
expect(edge).toContain("edge2");
expect(edge.length).toEqual(2);
});
});
it('should be able to load a tree node from '
+ 'ArangoDB by internal _id attribute', function() {
var c0, c1, c2, c3, c4;
runs(function() {
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
});
c0 = insertNode(nodesCollection, 0);
c1 = insertNode(nodesCollection, 1);
c2 = insertNode(nodesCollection, 2);
c3 = insertNode(nodesCollection, 3);
c4 = insertNode(nodesCollection, 4);
insertEdge(edgesCollection, c0, c1);
insertEdge(edgesCollection, c0, c2);
insertEdge(edgesCollection, c0, c3);
insertEdge(edgesCollection, c0, c4);
callbackCheck = false;
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existNodes([c0, c1, c2, c3, c4]);
expect(nodes.length).toEqual(5);
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery(c0, nodesCollection, edgesCollection))
);
});
});
it('should map loadNode to loadByID', function() {
spyOn(adapter, "loadNodeFromTreeById");
adapter.loadNode("a", "b");
expect(adapter.loadNodeFromTreeById).toHaveBeenCalledWith("a", "b");
});
it('should be able to load a tree node from ArangoDB'
+ ' by internal attribute and value', function() {
var c0, c1, c2, c3, c4;
runs(function() {
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
vars.id = c0;
request.success({result: loadGraph(vars)});
}
});
c0 = insertNode(nodesCollection, 0);
c1 = insertNode(nodesCollection, 1);
c2 = insertNode(nodesCollection, 2);
c3 = insertNode(nodesCollection, 3);
c4 = insertNode(nodesCollection, 4);
insertEdge(edgesCollection, c0, c1);
insertEdge(edgesCollection, c0, c2);
insertEdge(edgesCollection, c0, c3);
insertEdge(edgesCollection, c0, c4);
callbackCheck = false;
adapter.loadNodeFromTreeByAttributeValue("id", 0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existNodes([c0, c1, c2, c3, c4]);
expect(nodes.length).toEqual(5);
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(filterQuery(0, nodesCollection, edgesCollection))
);
});
});
it('should callback with proper errorcode if no results are found', function() {
var dummy = {
cb: function() {}
};
spyOn(dummy, "cb");
spyOn($, "ajax").andCallFake(function(request) {
request.success({result: []});
});
adapter.loadNode("node", dummy.cb);
expect(dummy.cb).wasCalledWith({
errorCode: 404
});
});
it('should be able to request the number of children centrality', function() {
var c0,
children;
runs(function() {
c0 = insertNode(nodesCollection, 0);
spyOn($, "ajax").andCallFake(function(request) {
request.success({result: [4]});
});
callbackCheck = false;
adapter.requestCentralityChildren(c0, function(count) {
callbackCheck = true;
children = count;
});
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
expect(children).toEqual(4);
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(childrenQuery(c0, nodesCollection, edgesCollection))
);
});
});
it('should encapsulate all attributes of nodes and edges in _data', function() {
var c0, c1, e1_2;
runs(function() {
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
});
c0 = insertNode(nodesCollection, 0, {name: "Alice", age: 42});
c1 = insertNode(nodesCollection, 1, {name: "Bob", age: 1337});
e1_2 = insertEdge(edgesCollection, c0, c1, {label: "knows"});
callbackCheck = false;
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(nodes[0]._data).toEqual({
_id: c0,
_key: jasmine.any(String),
_rev: jasmine.any(String),
id: 0,
name: "Alice",
age: 42
});
expect(nodes[1]._data).toEqual({
_id: c1,
_key: jasmine.any(String),
_rev: jasmine.any(String),
id: 1,
name: "Bob",
age: 1337
});
expect(edges[0]._data).toEqual({
_id: e1_2,
_from: c0,
_to: c1,
_key: jasmine.any(String),
_rev: jasmine.any(String),
label: "knows"
});
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery(c0, nodesCollection, edgesCollection))
);
});
});
it('should be able to switch to different collections', function() {
var c0, c1, e1_2, insertedId;
runs(function() {
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
} else {
request.success({
error: false,
_id: "TestNodes654/myNewNode",
_key: "myNewNode",
_rev: "1234"
});
}
});
c0 = insertNode(altNodesCollection, 0);
c1 = insertNode(altNodesCollection, 1);
e1_2 = insertEdge(altEdgesCollection, c0, c1);
adapter.changeToCollections(altNodesCollection, altEdgesCollection);
callbackCheck = false;
adapter.loadNode(c0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existNodes([c0, c1]);
expect(nodes.length).toEqual(2);
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery(c0, altNodesCollection, altEdgesCollection))
);
callbackCheck = false;
adapter.createNode({}, function(node) {
insertedId = node._id;
callbackCheck = true;
});
});
waitsFor(function() {
return callbackCheck;
}, 1500);
runs(function() {
existNode(insertedId);
expect($.ajax).toHaveBeenCalledWith(
requests.node(altNodesCollection).create({})
);
});
});
it('should be able to switch to different collections and change to directed', function() {
runs(function() {
spyOn($, "ajax");
adapter.changeToCollections(altNodesCollection, altEdgesCollection, false);
adapter.loadNode("42");
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery("42", altNodesCollection, altEdgesCollection, false))
);
});
});
it('should be able to switch to different collections'
+ ' and change to undirected', function() {
runs(function() {
spyOn($, "ajax");
adapter.changeToCollections(altNodesCollection, altEdgesCollection, true);
adapter.loadNode("42");
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery("42", altNodesCollection, altEdgesCollection, true))
);
});
});
it('should add at most the upper bound of children in one step', function() {
var inNodeCol, callNodes;
runs(function() {
var addNNodes = function(n) {
var i = 0,
res = [];
for (i = 0; i < n; i++) {
res.push(insertNode(nodesCollection, i));
}
return res;
},
connectToAllButSelf = function(source, ns) {
_.each(ns, function(target) {
if (source !== target) {
insertEdge(edgesCollection, source, target);
}
});
};
inNodeCol = addNNodes(21);
connectToAllButSelf(inNodeCol[0], inNodeCol);
adapter.setChildLimit(5);
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
});
spyOn(this, "fakeReducerBucketRequest").andCallFake(function(ns) {
var i = 0,
res = [],
pos;
callNodes = ns;
for (i = 0; i < 5; i++) {
pos = i*4;
res.push({
reason: {
type: "similar",
example: ns[pos]
},
nodes: ns.slice(pos, pos + 4)
});
}
return res;
});
callbackCheck = false;
adapter.loadNodeFromTreeById(inNodeCol[0], checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
var callNodesIds = _.map(callNodes, function(n) {
return n._id;
});
expect(this.fakeReducerBucketRequest).toHaveBeenCalledWith(
jasmine.any(Array),
5
);
expect(callNodesIds).toEqual(inNodeCol.slice(1));
expect(nodes.length).toEqual(6);
expect(getCommunityNodes().length).toEqual(5);
});
});
it('should not bucket existing nodes', function() {
var lastCallWith, n0, n1, n2, n3, n4, n5, n6;
runs(function() {
var connectToAllButSelf = function(source, ns) {
_.each(ns, function(target) {
if (source !== target) {
insertEdge(edgesCollection, source, target);
}
});
};
n0 = insertNode(nodesCollection, 0);
n1 = insertNode(nodesCollection, 1);
n2 = insertNode(nodesCollection, 2);
n3 = insertNode(nodesCollection, 3);
n4 = insertNode(nodesCollection, 4);
n5 = insertNode(nodesCollection, 5);
n6 = insertNode(nodesCollection, 6);
connectToAllButSelf(n0, [n1, n2, n3]);
insertEdge(edgesCollection, n1, n0);
insertEdge(edgesCollection, n1, n2);
insertEdge(edgesCollection, n1, n4);
insertEdge(edgesCollection, n1, n5);
insertEdge(edgesCollection, n1, n6);
adapter.setChildLimit(2);
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
});
spyOn(this, "fakeReducerBucketRequest").andCallFake(function(ns) {
lastCallWith = _.pluck(ns, "_id");
return [
{
reason: {
type: "similar",
example: ns[0]
},
nodes: [ns[0]]
},
{
reason: {
type: "similar",
example: ns[1]
},
nodes: [ns[1], ns[2]]
}
];
});
callbackCheck = false;
adapter.loadNodeFromTreeById(n0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(lastCallWith).toEqual([n1, n2, n3]);
expect(getCommunityNodes().length).toEqual(1);
callbackCheck = false;
adapter.loadNodeFromTreeById(n1, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(lastCallWith).toEqual([n4, n5, n6]);
expect(getCommunityNodes().length).toEqual(2);
});
});
it('should not replace single nodes by communities', function() {
var inNodeCol, callNodes;
runs(function() {
var addNNodes = function(n) {
var i = 0,
res = [];
for (i = 0; i < n; i++) {
res.push(insertNode(nodesCollection, i));
}
return res;
},
connectToAllButSelf = function(source, ns) {
_.each(ns, function(target) {
if (source !== target) {
insertEdge(edgesCollection, source, target);
}
});
};
inNodeCol = addNNodes(7);
connectToAllButSelf(inNodeCol[0], inNodeCol);
adapter.setChildLimit(5);
spyOn($, "ajax").andCallFake(function(request) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
});
spyOn(this, "fakeReducerBucketRequest").andCallFake(function(ns) {
var i = 0,
res = [],
pos;
for (i = 0; i < 4; i++) {
res.push({
reason: {
type: "similar",
example: ns[i]
},
nodes: [ns[i]]
});
}
res.push({
reason: {
type: "similar",
example: ns[4]
},
nodes: [ns[4], ns[5]]
});
return res;
});
callbackCheck = false;
adapter.loadNodeFromTreeById(inNodeCol[0], checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(this.fakeReducerBucketRequest).toHaveBeenCalledWith(
jasmine.any(Array),
5
);
expect(nodes.length).toEqual(6);
expect(getCommunityNodes().length).toEqual(1);
});
});
describe('that has already loaded one graph', function() {
var c0, c1, c2, c3, c4, c5, c6, c7,
fakeResult, spyHook;
beforeEach(function() {
runs(function() {
spyOn($, "ajax").andCallFake(function(request) {
if (spyHook !== undefined) {
if(!spyHook(request)) {
return;
}
}
if (request.url.indexOf("cursor", request.url.length - "cursor".length) !== -1) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
request.success({result: loadGraph(vars)});
}
} else {
request.success(fakeResult);
}
});
c0 = insertNode(nodesCollection, 0);
c1 = insertNode(nodesCollection, 1);
c2 = insertNode(nodesCollection, 2);
c3 = insertNode(nodesCollection, 3);
c4 = insertNode(nodesCollection, 4);
c5 = insertNode(nodesCollection, 5);
c6 = insertNode(nodesCollection, 6);
c7 = insertNode(nodesCollection, 7);
insertEdge(edgesCollection, c0, c1);
insertEdge(edgesCollection, c0, c2);
insertEdge(edgesCollection, c0, c3);
insertEdge(edgesCollection, c0, c4);
insertEdge(edgesCollection, c1, c5);
insertEdge(edgesCollection, c1, c6);
insertEdge(edgesCollection, c1, c7);
callbackCheck = false;
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
this.addMatchers({
toBeStoredPermanently: function() {
var id = this.actual,
res = false;
$.ajax({
type: "GET",
url: arangodb + "/_api/document/" + id,
contentType: "application/json",
processData: false,
async: false,
success: function(data) {
res = true;
},
error: function(data) {
try {
var temp = JSON.parse(data);
throw "[" + temp.errorNum + "] " + temp.errorMessage;
}
catch (e) {
throw "Undefined ERROR";
}
}
});
return res;
},
toNotBeStoredPermanently: function() {
var id = this.actual,
res = false;
$.ajax({
type: "GET",
url: arangodb + "/_api/document/" + id,
contentType: "application/json",
processData: false,
async: false,
success: function(data) {
},
error: function(data) {
if (data.status === 404) {
res = true;
}
}
});
return res;
},
toHavePermanentAttributeWithValue: function(attribute, value) {
var id = this.actual,
res = false;
$.ajax({
type: "GET",
url: arangodb + "/_api/document/" + id,
contentType: "application/json",
processData: false,
async: false,
success: function(data) {
if (data[attribute] === value) {
res = true;
}
},
error: function(data) {
}
});
return res;
}
});
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
callbackCheck = false;
});
});
it('should be able to add nodes from another query', function() {
runs(function() {
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existNodes([c0, c1, c2, c3, c4, c5, c6, c7]);
expect(nodes.length).toEqual(8);
expect($.ajax).toHaveBeenCalledWith(
requests.cursor(traversalQuery(c1, nodesCollection, edgesCollection))
);
});
});
it('should be able to change a value of one node permanently', function() {
var toPatch;
runs(function() {
fakeResult = {hello: "world"};
toPatch = nodeWithID(c0);
adapter.patchNode(toPatch, {hello: "world"}, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(toPatch._data.hello).toEqual("world");
expect($.ajax).toHaveBeenCalledWith(
requests.node(nodesCollection).patch(c0, fakeResult)
);
});
});
it('should be able to change a value of one edge permanently', function() {
var toPatch;
runs(function() {
fakeResult = {hello: "world"};
toPatch = edgeWithSourceAndTargetId(c0, c1);
adapter.patchEdge(toPatch, {hello: "world"}, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect(toPatch._data.hello).toEqual("world");
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).patch(toPatch._id, fakeResult)
);
});
});
it('should be able to remove an edge permanently', function() {
var toDelete;
runs(function() {
fakeResult = "";
toDelete = edgeWithSourceAndTargetId(c0, c4);
existEdge(c0, c4);
adapter.deleteEdge(toDelete, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).del(toDelete._id)
);
notExistEdge(c0, c4);
});
});
it('should be able to add a node permanently', function() {
var insertedId;
runs(function() {
fakeResult = {
error: false,
_id: "TestNodes123/MyNode",
_rev: "1234",
_key: "MyNode"
};
adapter.createNode({}, function(node) {
insertedId = node._id;
callbackCheck = true;
});
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect($.ajax).toHaveBeenCalledWith(
requests.node(nodesCollection).create({})
);
existNode(insertedId);
});
});
it('should trigger the reducer if too many nodes are added', function() {
runs(function() {
adapter.setNodeLimit(6);
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [c0]
}
});
});
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
expect(mockWrapper.call).toHaveBeenCalledWith("getCommunity", 6, c1);
});
});
describe('checking community nodes', function() {
it('should not trigger the reducer if the limit is set large enough', function() {
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [c0]
}
});
});
adapter.setNodeLimit(10);
expect(mockWrapper.call).not.toHaveBeenCalled();
});
it('should trigger the reducer if the limit is set too small', function() {
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [c0]
}
});
});
adapter.setNodeLimit(2);
expect(mockWrapper.call).toHaveBeenCalledWith("getCommunity", 2);
});
it('should create a community node if limit is set too small', function() {
runs(function() {
callbackCheck = false;
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [c0, c1, c2]
}
});
});
adapter.setNodeLimit(2, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
var commId = getCommunityNodesIds()[0];
notExistNodes([c0, c1, c2]);
existNode(commId);
existNodes([c3, c4]);
expect(nodes.length).toEqual(3);
existEdge(commId, c3);
existEdge(commId, c4);
expect(edges.length).toEqual(2);
});
});
it('should create a community node if too many nodes are added', function() {
runs(function() {
adapter.setNodeLimit(6);
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [c0, c1, c2, c3]
}
});
});
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
var commId = getCommunityNodesIds()[0];
notExistNodes([c0, c1, c2, c3]);
existNode(commId);
existNodes([c4, c5, c6, c7]);
expect(nodes.length).toEqual(5);
existEdge(commId, c4);
existEdge(commId, c5);
existEdge(commId, c6);
existEdge(commId, c7);
expect(edges.length).toEqual(4);
});
});
describe('expanding after a while', function() {
it('should connect edges of internal nodes accordingly', function() {
var commNode, called, counterCallback,
v0, v1, v2, v3, v4,
e0_1, e0_2, e1_3, e1_4, e2_3, e2_4;
runs(function() {
var v = "vertices",
e = "edges";
nodes.length = 0;
edges.length = 0;
v0 = insertNode(v, 0);
v1 = insertNode(v, 1);
v2 = insertNode(v, 2);
v3 = insertNode(v, 3);
v4 = insertNode(v, 4);
e0_1 = insertEdge(e, v0, v1);
e0_2 = insertEdge(e, v0, v2);
e1_3 = insertEdge(e, v1, v3);
e1_4 = insertEdge(e, v1, v4);
e2_3 = insertEdge(e, v2, v3);
e2_4 = insertEdge(e, v2, v4);
called = 0;
counterCallback = function() {
called++;
};
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [v1, v3, v4]
}
});
});
adapter.setNodeLimit(3);
adapter.changeToCollections(v, e);
adapter.loadNode(v0, counterCallback);
adapter.loadNode(v1, counterCallback);
});
waitsFor(function() {
return called === 2;
}, 1000);
runs(function() {
adapter.loadNode(v2, counterCallback);
commNode = getCommunityNodes()[0];
});
waitsFor(function() {
return called === 3;
}, 1000);
runs(function() {
var commId = commNode._id;
// Check start condition
existNodes([commId, v0, v2]);
expect(nodes.length).toEqual(3);
existEdge(v0, v2);
existEdge(v0, commId);
existEdge(v2, commId);
expect(edges.length).toEqual(4);
adapter.setNodeLimit(20);
adapter.expandCommunity(commNode, counterCallback);
});
waitsFor(function() {
return called === 4;
}, 1000);
runs(function() {
var commId = commNode._id;
existNodes([commId, v0, v2]);
expect(nodes.length).toEqual(3);
existEdge(v0, v2);
existEdge(v0, commId);
existEdge(v2, commId);
expect(edges.length).toEqual(4);
expect(commNode._expanded).toBeTruthy();
});
});
it('set inbound and outboundcounter correctly', function() {
var commNode, called, counterCallback,
v0, v1, v2, v3, v4,
e0_1, e0_2, e1_3, e1_4, e2_3, e2_4;
runs(function() {
var v = "vertices",
e = "edges";
nodes.length = 0;
edges.length = 0;
v0 = insertNode(v, 0);
v1 = insertNode(v, 1);
v2 = insertNode(v, 2);
v3 = insertNode(v, 3);
v4 = insertNode(v, 4);
e0_1 = insertEdge(e, v0, v1);
e0_2 = insertEdge(e, v0, v2);
e1_3 = insertEdge(e, v1, v3);
e1_4 = insertEdge(e, v1, v4);
e2_3 = insertEdge(e, v2, v3);
e2_4 = insertEdge(e, v2, v4);
called = 0;
counterCallback = function() {
called++;
};
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: [v1, v3, v4]
}
});
});
adapter.setNodeLimit(3);
adapter.changeToCollections(v, e);
adapter.loadNode(v0, counterCallback);
adapter.loadNode(v1, counterCallback);
});
waitsFor(function() {
return called === 2;
}, 1000);
runs(function() {
adapter.loadNode(v2, counterCallback);
commNode = getCommunityNodes()[0];
});
waitsFor(function() {
return called === 3;
}, 1000);
runs(function() {
adapter.setNodeLimit(20);
adapter.expandCommunity(commNode, counterCallback);
});
waitsFor(function() {
return called === 4;
}, 1000);
runs(function() {
var checkNodeWithInAndOut = function(id, inbound, outbound) {
var n = nodeWithID(id) || commNode.getNode(id);
expect(n._outboundCounter).toEqual(outbound);
expect(n._inboundCounter).toEqual(inbound);
};
checkNodeWithInAndOut(v0, 0, 2);
checkNodeWithInAndOut(v1, 1, 2);
checkNodeWithInAndOut(v2, 1, 2);
checkNodeWithInAndOut(v3, 2, 0);
checkNodeWithInAndOut(v4, 2, 0);
expect(commNode._outboundCounter).toEqual(0);
expect(commNode._inboundCounter).toEqual(3);
});
});
});
describe('that displays a community node already', function() {
var firstCommId,
fakeResult;
beforeEach(function() {
runs(function() {
callbackCheck = false;
adapter.setNodeLimit(7);
fakeResult = [c0, c2];
spyOn(mockWrapper, "call").andCallFake(function(name) {
workerCB({
data: {
cmd: name,
result: fakeResult
}
});
});
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
firstCommId = getCommunityNodesIds()[0];
});
});
it('should expand a community if enough space is available', function() {
runs(function() {
adapter.setNodeLimit(10);
callbackCheck = false;
adapter.expandCommunity(nodeWithID(firstCommId), checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
expect(getCommunityNodes().length).toEqual(1);
existNodes([firstCommId, c1, c3, c4, c5, c6, c7]);
existEdge(firstCommId, c1);
existEdge(firstCommId, c3);
existEdge(firstCommId, c4);
});
});
it('should expand a community and join another '
+ 'one if not enough space is available', function() {
runs(function() {
fakeResult = [c1, c7];
callbackCheck = false;
adapter.expandCommunity(nodeWithID(firstCommId), checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
var commId = getCommunityNodesIds()[0],
newCommId = getCommunityNodesIds()[1];
expect(getCommunityNodes().length).toEqual(2);
existNodes([commId, c3, c4, c5, c6, newCommId]);
notExistNodes([c0, c1, c2, c7]);
existEdge(commId, c3);
existEdge(commId, c4);
existEdge(commId, newCommId);
existEdge(newCommId, c5);
existEdge(newCommId, c6);
});
});
it('should join another community if space is further reduced', function() {
runs(function() {
fakeResult = [c1, c7];
callbackCheck = false;
adapter.setNodeLimit(6, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
expect(getCommunityNodes().length).toEqual(2);
var ids = getCommunityNodesIds(),
newCommId;
if (firstCommId === ids[0]) {
newCommId = ids[1];
} else {
newCommId = ids[0];
}
existNodes([c3, c4, c5, c6, firstCommId, newCommId]);
notExistNodes([c0, c1, c2, c7]);
existEdge(firstCommId, c3);
existEdge(firstCommId, c4);
existEdge(firstCommId, newCommId);
existEdge(newCommId, c5);
existEdge(newCommId, c6);
});
});
it('should connect edges to internal nodes', function() {
runs(function() {
insertEdge(edgesCollection, c3, c0);
adapter.setNodeLimit(20);
callbackCheck = false;
adapter.loadNode(c3, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existEdge(c3, firstCommId);
});
});
});
});
describe('that has loaded several queries', function() {
var c8, c9, e2_8;
beforeEach(function() {
runs(function() {
c8 = insertNode(nodesCollection, 8);
c9 = insertNode(nodesCollection, 9);
e2_8 = insertEdge(edgesCollection, c2, c8);
insertEdge(edgesCollection, c3, c8);
insertEdge(edgesCollection, c3, c9);
callbackCheck = false;
adapter.loadNodeFromTreeById(c2, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
});
runs(function() {
callbackCheck = false;
});
});
it('should not add a node to the list twice', function() {
runs(function() {
adapter.loadNodeFromTreeById(c3, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
existNodes([c0, c1, c2, c3, c4, c8, c9]);
expect(nodes.length).toEqual(7);
});
});
it('should be able to add an edge permanently', function() {
var insertedId,
source,
target,
insertedEdge;
runs(function() {
source = nodeWithID(c0);
target = nodeWithID(c8);
fakeResult = {
_id: edgesCollection + "/123",
_key: "123",
_rev: "123",
_from: source._id,
_to: target._id
};
adapter.createEdge({source: source, target: target}, function(edge) {
insertedId = edge._id;
callbackCheck = true;
insertedEdge = edge;
});
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
expect($.ajax).toHaveBeenCalledWith(
requests.edge(edgesCollection).create(source._id, target._id, {})
);
existEdge(source._id, target._id);
expect(insertedEdge).toEqual({
source: source,
target: target,
_id: insertedId,
_data: {
_id: insertedId,
_from: source._id,
_to: target._id,
_rev: jasmine.any(String),
_key: jasmine.any(String)
}
});
});
});
it('should be able to remove a node and all connected edges permanently', function() {
var toDelete;
runs(function() {
spyHook = function(request) {
if (request.data !== undefined) {
request.success({result: [
{_id: e2_8}
]});
return false;
}
return true;
};
fakeResult = "";
toDelete = nodeWithID(c2);
adapter.deleteNode(toDelete, checkCallbackFunction);
});
waits(2000);
runs(function() {
expect($.ajax).toHaveBeenCalledWith(
requests.node(nodesCollection).del(toDelete._id)
);
notExistNode(c2);
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).del(e2_8)
);
notExistEdge(c2, c8);
});
});
});
});
describe('displaying only parts of the graph', function() {
it('should be able to remove a node and all '
+ 'connected edges including not visible ones', function() {
var s0, s1, t0, toDel,
s0_toDel, s1_toDel, toDel_t0;
runs(function() {
callbackCheck = false;
s0 = insertNode(nodesCollection, 0);
s1 = insertNode(nodesCollection, 1);
t0 = insertNode(nodesCollection, 2);
toDel = insertNode(nodesCollection, 3);
s0_toDel = insertEdge(edgesCollection, s0, toDel);
s1_toDel = insertEdge(edgesCollection, s1, toDel);
toDel_t0 = insertEdge(edgesCollection, toDel, t0);
var loaded = false,
fakeResult = "";
spyOn($, "ajax").andCallFake(function(request) {
if (request.url.indexOf("cursor", request.url.length - "cursor".length) !== -1) {
if (!loaded) {
var vars = JSON.parse(request.data).bindVars;
if (vars !== undefined) {
loaded = true;
request.success({result: loadGraph(vars)});
}
} else {
request.success({result: [
{
_id: s0_toDel
},{
_id: s1_toDel
},{
_id: toDel_t0
}
]});
}
} else {
request.success(fakeResult);
}
});
adapter.loadNodeFromTreeById(s0, checkCallbackFunction);
});
waitsFor(function() {
return callbackCheck;
}, 1000);
runs(function() {
callbackCheck = false;
adapter.deleteNode(nodeWithID(toDel), checkCallbackFunction);
});
// Wait 2 seconds as no handle for the deletion of edges exists.
waits(2000);
runs(function() {
notExistNodes([toDel, s1, t0]);
existNode(s0);
notExistEdge(s0, toDel);
notExistEdge(s1, toDel);
notExistEdge(toDel, t0);
expect($.ajax).toHaveBeenCalledWith(
requests.node(nodesCollection).del(toDel)
);
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).del(s0_toDel)
);
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).del(s1_toDel)
);
expect($.ajax).toHaveBeenCalledWith(
requests.node(edgesCollection).del(toDel_t0)
);
// Check if counter is set correctly
expect(nodeWithID(s0)._outboundCounter).toEqual(0);
});
});
});
});
});
}());
| morsdatum/ArangoDB | js/apps/system/aardvark/test/specs/graphViewer/specAdapter/arangoAdapterSpec.js | JavaScript | apache-2.0 | 62,481 |
package v1
import (
v1 "github.com/openshift/api/security/v1"
rest "k8s.io/client-go/rest"
)
// PodSecurityPolicySelfSubjectReviewsGetter has a method to return a PodSecurityPolicySelfSubjectReviewInterface.
// A group's client should implement this interface.
type PodSecurityPolicySelfSubjectReviewsGetter interface {
PodSecurityPolicySelfSubjectReviews(namespace string) PodSecurityPolicySelfSubjectReviewInterface
}
// PodSecurityPolicySelfSubjectReviewInterface has methods to work with PodSecurityPolicySelfSubjectReview resources.
type PodSecurityPolicySelfSubjectReviewInterface interface {
Create(*v1.PodSecurityPolicySelfSubjectReview) (*v1.PodSecurityPolicySelfSubjectReview, error)
PodSecurityPolicySelfSubjectReviewExpansion
}
// podSecurityPolicySelfSubjectReviews implements PodSecurityPolicySelfSubjectReviewInterface
type podSecurityPolicySelfSubjectReviews struct {
client rest.Interface
ns string
}
// newPodSecurityPolicySelfSubjectReviews returns a PodSecurityPolicySelfSubjectReviews
func newPodSecurityPolicySelfSubjectReviews(c *SecurityV1Client, namespace string) *podSecurityPolicySelfSubjectReviews {
return &podSecurityPolicySelfSubjectReviews{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a podSecurityPolicySelfSubjectReview and creates it. Returns the server's representation of the podSecurityPolicySelfSubjectReview, and an error, if there is any.
func (c *podSecurityPolicySelfSubjectReviews) Create(podSecurityPolicySelfSubjectReview *v1.PodSecurityPolicySelfSubjectReview) (result *v1.PodSecurityPolicySelfSubjectReview, err error) {
result = &v1.PodSecurityPolicySelfSubjectReview{}
err = c.client.Post().
Namespace(c.ns).
Resource("podsecuritypolicyselfsubjectreviews").
Body(podSecurityPolicySelfSubjectReview).
Do().
Into(result)
return
}
| rhamilto/origin | vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go | GO | apache-2.0 | 1,856 |
export default path => {
if (path === undefined) {
return path
}
if (path === `/`) {
return `/`
}
if (path.charAt(path.length - 1) === `/`) {
return path.slice(0, -1)
}
return path
}
| BigBoss424/portfolio | v8/development/node_modules/gatsby/cache-dir/normalize-page-path.js | JavaScript | apache-2.0 | 209 |
/*
* Copyright 2015 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.pcepio.exceptions;
/**
* Custom Exception for PCEP IO.
*/
public class PcepParseException extends Exception {
private static final long serialVersionUID = 7960991379951448423L;
private byte errType = 0;
private byte errValue = 0;
/**
* Default constructor to create a new exception.
*/
public PcepParseException() {
super();
}
/**
* Constructor to create exception from message and cause.
*
* @param message the detail of exception in string
* @param cause underlying cause of the error
*/
public PcepParseException(final String message, final Throwable cause) {
super(message, cause);
}
/**
* Constructor to create exception from message.
*
* @param message the detail of exception in string
*/
public PcepParseException(final String message) {
super(message);
}
/**
* Constructor to create exception from error type and error value.
*
* @param errType error type of pcep
* @param errValue error value of pcep
*/
public PcepParseException(final byte errType, final byte errValue) {
super();
this.errType = errType;
this.errValue = errValue;
}
/**
* Constructor to create exception from cause.
*
* @param cause underlying cause of the error
*/
public PcepParseException(final Throwable cause) {
super(cause);
}
/**
* Returns error type for this exception.
*
* @return ErrorType
*/
public byte getErrorType() {
return this.errType;
}
/**
* Returns error value for this exception.
*
* @return ErrorValue
*/
public byte getErrorValue() {
return this.errValue;
}
}
| planoAccess/clonedONOS | protocols/pcep/pcepio/src/main/java/org/onosproject/pcepio/exceptions/PcepParseException.java | Java | apache-2.0 | 2,415 |
package sti
import (
"errors"
"fmt"
"io"
"path/filepath"
"reflect"
"regexp/syntax"
"strings"
"testing"
"github.com/openshift/source-to-image/pkg/api"
"github.com/openshift/source-to-image/pkg/build"
"github.com/openshift/source-to-image/pkg/docker"
s2ierr "github.com/openshift/source-to-image/pkg/errors"
"github.com/openshift/source-to-image/pkg/ignore"
"github.com/openshift/source-to-image/pkg/scm/downloaders/empty"
"github.com/openshift/source-to-image/pkg/scm/downloaders/file"
gitdownloader "github.com/openshift/source-to-image/pkg/scm/downloaders/git"
"github.com/openshift/source-to-image/pkg/scm/git"
"github.com/openshift/source-to-image/pkg/test"
testfs "github.com/openshift/source-to-image/pkg/test/fs"
"github.com/openshift/source-to-image/pkg/util/fs"
)
type FakeSTI struct {
CleanupCalled bool
PrepareCalled bool
SetupRequired []string
SetupOptional []string
SetupError error
ExistsCalled bool
ExistsError error
BuildRequest *api.Config
BuildResult *api.Result
DownloadError error
SaveArtifactsCalled bool
SaveArtifactsError error
FetchSourceCalled bool
FetchSourceError error
ExecuteCommand string
ExecuteUser string
ExecuteError error
ExpectedError bool
LayeredBuildCalled bool
LayeredBuildError error
PostExecuteDestination string
PostExecuteContainerID string
PostExecuteError error
}
func newFakeBaseSTI() *STI {
return &STI{
config: &api.Config{},
result: &api.Result{},
docker: &docker.FakeDocker{},
installer: &test.FakeInstaller{},
git: &test.FakeGit{},
fs: &testfs.FakeFileSystem{},
tar: &test.FakeTar{},
}
}
func newFakeSTI(f *FakeSTI) *STI {
s := &STI{
config: &api.Config{},
result: &api.Result{},
docker: &docker.FakeDocker{},
runtimeDocker: &docker.FakeDocker{},
installer: &test.FakeInstaller{},
git: &test.FakeGit{},
fs: &testfs.FakeFileSystem{},
tar: &test.FakeTar{},
preparer: f,
ignorer: &ignore.DockerIgnorer{},
artifacts: f,
scripts: f,
garbage: f,
layered: &FakeDockerBuild{f},
}
s.source = &gitdownloader.Clone{Git: s.git, FileSystem: s.fs}
return s
}
func (f *FakeSTI) Cleanup(*api.Config) {
f.CleanupCalled = true
}
func (f *FakeSTI) Prepare(*api.Config) error {
f.PrepareCalled = true
f.SetupRequired = []string{api.Assemble, api.Run}
f.SetupOptional = []string{api.SaveArtifacts}
return nil
}
func (f *FakeSTI) Exists(*api.Config) bool {
f.ExistsCalled = true
return true
}
func (f *FakeSTI) Request() *api.Config {
return f.BuildRequest
}
func (f *FakeSTI) Result() *api.Result {
return f.BuildResult
}
func (f *FakeSTI) Save(*api.Config) error {
f.SaveArtifactsCalled = true
return f.SaveArtifactsError
}
func (f *FakeSTI) fetchSource() error {
return f.FetchSourceError
}
func (f *FakeSTI) Download(*api.Config) (*git.SourceInfo, error) {
return nil, f.DownloadError
}
func (f *FakeSTI) Execute(command string, user string, r *api.Config) error {
f.ExecuteCommand = command
f.ExecuteUser = user
return f.ExecuteError
}
func (f *FakeSTI) wasExpectedError(text string) bool {
return f.ExpectedError
}
func (f *FakeSTI) PostExecute(id, destination string) error {
f.PostExecuteContainerID = id
f.PostExecuteDestination = destination
return f.PostExecuteError
}
type FakeDockerBuild struct {
*FakeSTI
}
func (f *FakeDockerBuild) Build(*api.Config) (*api.Result, error) {
f.LayeredBuildCalled = true
return &api.Result{}, f.LayeredBuildError
}
func TestDefaultSource(t *testing.T) {
config := &api.Config{
Source: git.MustParse("."),
DockerConfig: &api.DockerConfig{Endpoint: "unix:///var/run/docker.sock"},
}
client, err := docker.NewEngineAPIClient(config.DockerConfig)
if err != nil {
t.Fatal(err)
}
sti, err := New(client, config, fs.NewFileSystem(), build.Overrides{})
if err != nil {
t.Fatal(err)
}
if config.Source == nil {
t.Errorf("Config.Source not set: %v", config.Source)
}
if _, ok := sti.source.(*file.File); !ok || sti.source == nil {
t.Errorf("Source interface not set: %#v", sti.source)
}
}
func TestEmptySource(t *testing.T) {
config := &api.Config{
Source: nil,
DockerConfig: &api.DockerConfig{Endpoint: "unix:///var/run/docker.sock"},
}
client, err := docker.NewEngineAPIClient(config.DockerConfig)
if err != nil {
t.Fatal(err)
}
sti, err := New(client, config, fs.NewFileSystem(), build.Overrides{})
if err != nil {
t.Fatal(err)
}
if config.Source != nil {
t.Errorf("Config.Source unexpectantly changed: %v", config.Source)
}
if _, ok := sti.source.(*empty.Noop); !ok || sti.source == nil {
t.Errorf("Source interface not set: %#v", sti.source)
}
}
func TestOverrides(t *testing.T) {
fd := &FakeSTI{}
client, err := docker.NewEngineAPIClient(&api.DockerConfig{Endpoint: "unix:///var/run/docker.sock"})
if err != nil {
t.Fatal(err)
}
sti, err := New(client,
&api.Config{
DockerConfig: &api.DockerConfig{Endpoint: "unix:///var/run/docker.sock"},
},
fs.NewFileSystem(),
build.Overrides{
Downloader: fd,
},
)
if err != nil {
t.Fatal(err)
}
if sti.source != fd {
t.Errorf("Override of downloader not set: %#v", sti)
}
}
func TestBuild(t *testing.T) {
incrementalTest := []bool{false, true}
for _, incremental := range incrementalTest {
fh := &FakeSTI{
BuildRequest: &api.Config{Incremental: incremental},
BuildResult: &api.Result{},
}
builder := newFakeSTI(fh)
builder.Build(&api.Config{Incremental: incremental})
// Verify the right scripts were configed
if !reflect.DeepEqual(fh.SetupRequired, []string{api.Assemble, api.Run}) {
t.Errorf("Unexpected required scripts configed: %#v", fh.SetupRequired)
}
if !reflect.DeepEqual(fh.SetupOptional, []string{api.SaveArtifacts}) {
t.Errorf("Unexpected optional scripts configed: %#v", fh.SetupOptional)
}
// Verify that Exists was called
if !fh.ExistsCalled {
t.Errorf("Exists was not called.")
}
// Verify that Save was called for an incremental build
if incremental && !fh.SaveArtifactsCalled {
t.Errorf("Save artifacts was not called for an incremental build")
}
// Verify that Execute was called with the right script
if fh.ExecuteCommand != api.Assemble {
t.Errorf("Unexpected execute command: %s", fh.ExecuteCommand)
}
}
}
func TestLayeredBuild(t *testing.T) {
fh := &FakeSTI{
BuildRequest: &api.Config{
BuilderImage: "testimage",
},
BuildResult: &api.Result{
BuildInfo: api.BuildInfo{
Stages: []api.StageInfo{},
},
},
ExecuteError: errMissingRequirements,
ExpectedError: true,
}
builder := newFakeSTI(fh)
builder.Build(&api.Config{BuilderImage: "testimage"})
// Verify layered build
if !fh.LayeredBuildCalled {
t.Errorf("Layered build was not called.")
}
}
func TestBuildErrorExecute(t *testing.T) {
fh := &FakeSTI{
BuildRequest: &api.Config{
BuilderImage: "testimage",
},
BuildResult: &api.Result{},
ExecuteError: errors.New("ExecuteError"),
ExpectedError: false,
}
builder := newFakeSTI(fh)
_, err := builder.Build(&api.Config{BuilderImage: "testimage"})
if err == nil || err.Error() != "ExecuteError" {
t.Errorf("An error was expected, but got different %v", err)
}
}
func TestWasExpectedError(t *testing.T) {
type expErr struct {
text string
expected bool
}
tests := []expErr{
{ // 0 - tar error
text: `/bin/sh: tar: not found`,
expected: true,
},
{ // 1 - tar error
text: `/bin/sh: tar: command not found`,
expected: true,
},
{ // 2 - /bin/sh error
text: `exec: "/bin/sh": stat /bin/sh: no such file or directory`,
expected: true,
},
{ // 3 - non container error
text: "other error",
expected: false,
},
}
for i, ti := range tests {
result := isMissingRequirements(ti.text)
if result != ti.expected {
t.Errorf("(%d) Unexpected result: %v. Expected: %v", i, result, ti.expected)
}
}
}
func testBuildHandler() *STI {
s := &STI{
docker: &docker.FakeDocker{},
incrementalDocker: &docker.FakeDocker{},
installer: &test.FakeInstaller{},
git: &test.FakeGit{},
fs: &testfs.FakeFileSystem{ExistsResult: map[string]bool{filepath.FromSlash("a-repo-source"): true}},
tar: &test.FakeTar{},
config: &api.Config{},
result: &api.Result{},
callbackInvoker: &test.FakeCallbackInvoker{},
}
s.source = &gitdownloader.Clone{Git: s.git, FileSystem: s.fs}
s.initPostExecutorSteps()
return s
}
func TestPostExecute(t *testing.T) {
type postExecuteTest struct {
tag string
incremental bool
previousImageID string
scriptsFromImage bool
}
testCases := []postExecuteTest{
// 0: tagged, incremental, without previous image
{"test/tag", true, "", true},
// 1: tagged, incremental, with previous image
{"test/tag", true, "test-image", true},
// 2: tagged, no incremental, without previous image
{"test/tag", false, "", true},
// 3: tagged, no incremental, with previous image
{"test/tag", false, "test-image", true},
// 4: no tag, incremental, without previous image
{"", true, "", false},
// 5: no tag, incremental, with previous image
{"", true, "test-image", false},
// 6: no tag, no incremental, without previous image
{"", false, "", false},
// 7: no tag, no incremental, with previous image
{"", false, "test-image", false},
}
for i, tc := range testCases {
bh := testBuildHandler()
containerID := "test-container-id"
bh.result.Messages = []string{"one", "two"}
bh.config.Tag = tc.tag
bh.config.Incremental = tc.incremental
dh := bh.docker.(*docker.FakeDocker)
if tc.previousImageID != "" {
bh.config.RemovePreviousImage = true
bh.incremental = tc.incremental
bh.docker.(*docker.FakeDocker).GetImageIDResult = tc.previousImageID
}
if tc.scriptsFromImage {
bh.scriptsURL = map[string]string{api.Run: "image:///usr/libexec/s2i/run"}
}
err := bh.PostExecute(containerID, "cmd1")
if err != nil {
t.Errorf("(%d) Unexpected error from postExecute: %v", i, err)
}
// Ensure CommitContainer was called with the right parameters
expectedCmd := []string{"cmd1/scripts/" + api.Run}
if tc.scriptsFromImage {
expectedCmd = []string{"/usr/libexec/s2i/" + api.Run}
}
if !reflect.DeepEqual(dh.CommitContainerOpts.Command, expectedCmd) {
t.Errorf("(%d) Unexpected commit container command: %#v, expected %q", i, dh.CommitContainerOpts.Command, expectedCmd)
}
if dh.CommitContainerOpts.Repository != tc.tag {
t.Errorf("(%d) Unexpected tag committed, expected %q, got %q", i, tc.tag, dh.CommitContainerOpts.Repository)
}
// Ensure image removal when incremental and previousImageID present
if tc.incremental && tc.previousImageID != "" {
if dh.RemoveImageName != "test-image" {
t.Errorf("(%d) Previous image was not removed: %q", i, dh.RemoveImageName)
}
} else {
if dh.RemoveImageName != "" {
t.Errorf("(%d) Unexpected image removed: %s", i, dh.RemoveImageName)
}
}
}
}
func TestExists(t *testing.T) {
type incrementalTest struct {
// incremental flag was passed
incremental bool
// previous image existence
previousImage bool
// script installed
scriptInstalled bool
// expected result
expected bool
}
tests := []incrementalTest{
// 0-1: incremental, no image, no matter what with scripts
{true, false, false, false},
{true, false, true, false},
// 2: incremental, previous image, no scripts
{true, true, false, false},
// 3: incremental, previous image, scripts installed
{true, true, true, true},
// 4-7: no incremental build - should always return false no matter what other flags are
{false, false, false, false},
{false, false, true, false},
{false, true, false, false},
{false, true, true, false},
}
for i, ti := range tests {
bh := testBuildHandler()
bh.config.WorkingDir = "/working-dir"
bh.config.Incremental = ti.incremental
bh.config.BuilderPullPolicy = api.PullAlways
bh.installedScripts = map[string]bool{api.SaveArtifacts: ti.scriptInstalled}
bh.incrementalDocker.(*docker.FakeDocker).PullResult = ti.previousImage
bh.config.DockerConfig = &api.DockerConfig{Endpoint: "http://localhost:4243"}
incremental := bh.Exists(bh.config)
if incremental != ti.expected {
t.Errorf("(%d) Unexpected incremental result: %v. Expected: %v",
i, incremental, ti.expected)
}
if ti.incremental && ti.previousImage && ti.scriptInstalled {
if len(bh.fs.(*testfs.FakeFileSystem).ExistsFile) == 0 {
continue
}
scriptChecked := bh.fs.(*testfs.FakeFileSystem).ExistsFile[0]
expectedScript := "/working-dir/upload/scripts/save-artifacts"
if scriptChecked != expectedScript {
t.Errorf("(%d) Unexpected script checked. Actual: %s. Expected: %s",
i, scriptChecked, expectedScript)
}
}
}
}
func TestSaveArtifacts(t *testing.T) {
bh := testBuildHandler()
bh.config.WorkingDir = "/working-dir"
bh.config.Tag = "image/tag"
fakeFS := bh.fs.(*testfs.FakeFileSystem)
fd := bh.docker.(*docker.FakeDocker)
th := bh.tar.(*test.FakeTar)
err := bh.Save(bh.config)
if err != nil {
t.Errorf("Unexpected error when saving artifacts: %v", err)
}
expectedArtifactDir := "/working-dir/upload/artifacts"
if filepath.ToSlash(fakeFS.MkdirDir) != expectedArtifactDir {
t.Errorf("Mkdir was not called with the expected directory: %s",
fakeFS.MkdirDir)
}
if fd.RunContainerOpts.Image != bh.config.Tag {
t.Errorf("Unexpected image sent to RunContainer: %s",
fd.RunContainerOpts.Image)
}
if filepath.ToSlash(th.ExtractTarDir) != expectedArtifactDir || th.ExtractTarReader == nil {
t.Errorf("ExtractTar was not called with the expected parameters.")
}
}
func TestSaveArtifactsCustomTag(t *testing.T) {
bh := testBuildHandler()
bh.config.WorkingDir = "/working-dir"
bh.config.IncrementalFromTag = "custom/tag"
bh.config.Tag = "image/tag"
fakeFS := bh.fs.(*testfs.FakeFileSystem)
fd := bh.docker.(*docker.FakeDocker)
th := bh.tar.(*test.FakeTar)
err := bh.Save(bh.config)
if err != nil {
t.Errorf("Unexpected error when saving artifacts: %v", err)
}
expectedArtifactDir := "/working-dir/upload/artifacts"
if filepath.ToSlash(fakeFS.MkdirDir) != expectedArtifactDir {
t.Errorf("Mkdir was not called with the expected directory: %s",
fakeFS.MkdirDir)
}
if fd.RunContainerOpts.Image != bh.config.IncrementalFromTag {
t.Errorf("Unexpected image sent to RunContainer: %s",
fd.RunContainerOpts.Image)
}
if filepath.ToSlash(th.ExtractTarDir) != expectedArtifactDir || th.ExtractTarReader == nil {
t.Errorf("ExtractTar was not called with the expected parameters.")
}
}
func TestSaveArtifactsRunError(t *testing.T) {
tests := []error{
fmt.Errorf("Run error"),
s2ierr.NewContainerError("", -1, ""),
}
expected := []error{
tests[0],
s2ierr.NewSaveArtifactsError("", "", tests[1]),
}
// test with tar extract error or not
tarError := []bool{true, false}
for i := range tests {
for _, te := range tarError {
bh := testBuildHandler()
fd := bh.docker.(*docker.FakeDocker)
th := bh.tar.(*test.FakeTar)
fd.RunContainerError = tests[i]
if te {
th.ExtractTarError = fmt.Errorf("tar error")
}
err := bh.Save(bh.config)
if !te && err != expected[i] {
t.Errorf("Unexpected error returned from saveArtifacts: %v", err)
} else if te && err != th.ExtractTarError {
t.Errorf("Expected tar error. Got %v", err)
}
}
}
}
func TestSaveArtifactsErrorBeforeStart(t *testing.T) {
bh := testBuildHandler()
fd := bh.docker.(*docker.FakeDocker)
expected := fmt.Errorf("run error")
fd.RunContainerError = expected
fd.RunContainerErrorBeforeStart = true
err := bh.Save(bh.config)
if err != expected {
t.Errorf("Unexpected error returned from saveArtifacts: %v", err)
}
}
func TestSaveArtifactsExtractError(t *testing.T) {
bh := testBuildHandler()
th := bh.tar.(*test.FakeTar)
expected := fmt.Errorf("extract error")
th.ExtractTarError = expected
err := bh.Save(bh.config)
if err != expected {
t.Errorf("Unexpected error returned from saveArtifacts: %v", err)
}
}
func TestFetchSource(t *testing.T) {
type fetchTest struct {
refSpecified bool
checkoutExpected bool
}
tests := []fetchTest{
// 0
{
refSpecified: false,
checkoutExpected: false,
},
// 1
{
refSpecified: true,
checkoutExpected: true,
},
}
for testNum, ft := range tests {
bh := testBuildHandler()
gh := bh.git.(*test.FakeGit)
bh.config.WorkingDir = "/working-dir"
bh.config.Source = git.MustParse("a-repo-source")
if ft.refSpecified {
bh.config.Source.URL.Fragment = "a-branch"
}
expectedTargetDir := "/working-dir/upload/src"
_, e := bh.source.Download(bh.config)
if e != nil {
t.Errorf("Unexpected error %v [%d]", e, testNum)
}
if gh.CloneSource.StringNoFragment() != "a-repo-source" {
t.Errorf("Clone was not called with the expected source. Got %s, expected %s [%d]", gh.CloneSource, "a-source-repo-source", testNum)
}
if filepath.ToSlash(gh.CloneTarget) != expectedTargetDir {
t.Errorf("Unexpected target directory for clone operation. Got %s, expected %s [%d]", gh.CloneTarget, expectedTargetDir, testNum)
}
if ft.checkoutExpected {
if gh.CheckoutRef != "a-branch" {
t.Errorf("Checkout was not called with the expected branch. Got %s, expected %s [%d]", gh.CheckoutRef, "a-branch", testNum)
}
if filepath.ToSlash(gh.CheckoutRepo) != expectedTargetDir {
t.Errorf("Unexpected target repository for checkout operation. Got %s, expected %s [%d]", gh.CheckoutRepo, expectedTargetDir, testNum)
}
}
}
}
func TestPrepareOK(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.SetScripts([]string{api.Assemble, api.Run}, []string{api.SaveArtifacts})
rh.fs.(*testfs.FakeFileSystem).WorkingDirResult = "/working-dir"
err := rh.Prepare(rh.config)
if err != nil {
t.Errorf("An error occurred setting up the config handler: %v", err)
}
if !rh.fs.(*testfs.FakeFileSystem).WorkingDirCalled {
t.Errorf("Working directory was not created.")
}
var expected []string
for _, dir := range workingDirs {
expected = append(expected, filepath.FromSlash("/working-dir/"+dir))
}
mkdirs := rh.fs.(*testfs.FakeFileSystem).MkdirAllDir
if !reflect.DeepEqual(mkdirs, expected) {
t.Errorf("Unexpected set of MkdirAll calls: %#v", mkdirs)
}
scripts := rh.installer.(*test.FakeInstaller).Scripts
if !reflect.DeepEqual(scripts[0], []string{api.Assemble, api.Run}) {
t.Errorf("Unexpected set of required scripts: %#v", scripts[0])
}
if !reflect.DeepEqual(scripts[1], []string{api.SaveArtifacts}) {
t.Errorf("Unexpected set of optional scripts: %#v", scripts[1])
}
}
func TestPrepareErrorCreatingWorkingDir(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.fs.(*testfs.FakeFileSystem).WorkingDirError = errors.New("WorkingDirError")
err := rh.Prepare(rh.config)
if err == nil || err.Error() != "WorkingDirError" {
t.Errorf("An error was expected for WorkingDir, but got different: %v", err)
}
}
func TestPrepareErrorMkdirAll(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.fs.(*testfs.FakeFileSystem).MkdirAllError = errors.New("MkdirAllError")
err := rh.Prepare(rh.config)
if err == nil || err.Error() != "MkdirAllError" {
t.Errorf("An error was expected for MkdirAll, but got different: %v", err)
}
}
func TestPrepareErrorRequiredDownloadAndInstall(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.SetScripts([]string{api.Assemble, api.Run}, []string{api.SaveArtifacts})
rh.installer.(*test.FakeInstaller).Error = fmt.Errorf("%v", api.Assemble)
err := rh.Prepare(rh.config)
if err == nil || err.Error() != api.Assemble {
t.Errorf("An error was expected for required DownloadAndInstall, but got different: %v", err)
}
}
func TestPrepareErrorOptionalDownloadAndInstall(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.SetScripts([]string{api.Assemble, api.Run}, []string{api.SaveArtifacts})
err := rh.Prepare(rh.config)
if err != nil {
t.Errorf("Unexpected error when downloading optional scripts: %v", err)
}
}
func TestPrepareUseCustomRuntimeArtifacts(t *testing.T) {
expectedMapping := filepath.FromSlash("/src") + ":dst"
builder := newFakeSTI(&FakeSTI{})
config := builder.config
config.RuntimeImage = "my-app"
config.RuntimeArtifacts.Set(expectedMapping)
if err := builder.Prepare(config); err != nil {
t.Fatalf("Prepare() unexpectedly failed with error: %v", err)
}
if actualMapping := config.RuntimeArtifacts.String(); actualMapping != expectedMapping {
t.Errorf("Prepare() shouldn't change mapping, but it was modified from %v to %v", expectedMapping, actualMapping)
}
}
func TestPrepareFailForEmptyRuntimeArtifacts(t *testing.T) {
builder := newFakeSTI(&FakeSTI{})
fakeDocker := builder.docker.(*docker.FakeDocker)
fakeDocker.AssembleInputFilesResult = ""
config := builder.config
config.RuntimeImage = "my-app"
if len(config.RuntimeArtifacts) > 0 {
t.Fatalf("RuntimeArtifacts must be empty by default")
}
err := builder.Prepare(config)
if err == nil {
t.Errorf("Prepare() should fail but it didn't")
} else if expectedError := "no runtime artifacts to copy"; !strings.Contains(err.Error(), expectedError) {
t.Errorf("Prepare() should fail with error that contains text %q but failed with error: %q", expectedError, err)
}
}
func TestPrepareRuntimeArtifactsValidation(t *testing.T) {
testCases := []struct {
mapping string
expectedError string
}{
{
mapping: "src:dst",
expectedError: "source must be an absolute path",
},
{
mapping: "/src:/dst",
expectedError: "destination must be a relative path",
},
{
mapping: "/src:../dst",
expectedError: "destination cannot start with '..'",
},
}
for _, testCase := range testCases {
for _, mappingFromUser := range []bool{true, false} {
builder := newFakeSTI(&FakeSTI{})
config := builder.config
config.RuntimeImage = "my-app"
if mappingFromUser {
config.RuntimeArtifacts.Set(testCase.mapping)
} else {
fakeDocker := builder.docker.(*docker.FakeDocker)
fakeDocker.AssembleInputFilesResult = testCase.mapping
}
err := builder.Prepare(config)
if err == nil {
t.Errorf("Prepare() should fail but it didn't")
} else if !strings.Contains(err.Error(), testCase.expectedError) {
t.Errorf("Prepare() should fail to validate mapping %q with error that contains text %q but failed with error: %q", testCase.mapping, testCase.expectedError, err)
}
}
}
}
func TestPrepareSetRuntimeArtifacts(t *testing.T) {
for _, mapping := range []string{filepath.FromSlash("/src") + ":dst", filepath.FromSlash("/src1") + ":dst1;" + filepath.FromSlash("/src1") + ":dst1"} {
expectedMapping := strings.Replace(mapping, ";", ",", -1)
builder := newFakeSTI(&FakeSTI{})
fakeDocker := builder.docker.(*docker.FakeDocker)
fakeDocker.AssembleInputFilesResult = mapping
config := builder.config
config.RuntimeImage = "my-app"
if len(config.RuntimeArtifacts) > 0 {
t.Fatalf("RuntimeArtifacts must be empty by default")
}
if err := builder.Prepare(config); err != nil {
t.Fatalf("Prepare() unexpectedly failed with error: %v", err)
}
if actualMapping := config.RuntimeArtifacts.String(); actualMapping != expectedMapping {
t.Errorf("Prepare() shouldn't change mapping, but it was modified from %v to %v", expectedMapping, actualMapping)
}
}
}
func TestPrepareDownloadAssembleRuntime(t *testing.T) {
installer := &test.FakeInstaller{}
builder := newFakeSTI(&FakeSTI{})
builder.runtimeInstaller = installer
builder.optionalRuntimeScripts = []string{api.AssembleRuntime}
config := builder.config
config.RuntimeImage = "my-app"
config.RuntimeArtifacts.Set("/src:dst")
if err := builder.Prepare(config); err != nil {
t.Fatalf("Prepare() unexpectedly failed with error: %v", err)
}
if len(installer.Scripts) != 1 || installer.Scripts[0][0] != api.AssembleRuntime {
t.Errorf("Prepare() should download %q script but it downloaded %v", api.AssembleRuntime, installer.Scripts)
}
}
func TestExecuteOK(t *testing.T) {
rh := newFakeBaseSTI()
pe := &FakeSTI{}
rh.postExecutor = pe
rh.config.WorkingDir = "/working-dir"
rh.config.BuilderImage = "test/image"
rh.config.BuilderPullPolicy = api.PullAlways
rh.config.Environment = api.EnvironmentList{
api.EnvironmentSpec{
Name: "Key1",
Value: "Value1",
},
api.EnvironmentSpec{
Name: "Key2",
Value: "Value2",
},
}
expectedEnv := []string{"Key1=Value1", "Key2=Value2"}
th := rh.tar.(*test.FakeTar)
th.CreateTarResult = "/working-dir/test.tar"
fd := rh.docker.(*docker.FakeDocker)
fd.RunContainerContainerID = "1234"
fd.RunContainerCmd = []string{"one", "two"}
err := rh.Execute("test-command", "foo", rh.config)
if err != nil {
t.Errorf("Unexpected error returned: %v", err)
}
th = rh.tar.(*test.FakeTar).Copy()
if th.CreateTarBase != "" {
t.Errorf("Unexpected tar base directory: %s", th.CreateTarBase)
}
if filepath.ToSlash(th.CreateTarDir) != "/working-dir/upload" {
t.Errorf("Unexpected tar directory: %s", th.CreateTarDir)
}
fh, ok := rh.fs.(*testfs.FakeFileSystem)
if !ok {
t.Fatalf("Unable to convert %v to FakeFilesystem", rh.fs)
}
if fh.OpenFile != "" {
t.Fatalf("Unexpected file opened: %s", fh.OpenFile)
}
if fh.OpenFileResult != nil {
t.Errorf("Tar file was opened.")
}
ro := fd.RunContainerOpts
if ro.User != "foo" {
t.Errorf("Expected user to be foo, got %q", ro.User)
}
if ro.Image != rh.config.BuilderImage {
t.Errorf("Unexpected Image passed to RunContainer")
}
if _, ok := ro.Stdin.(*io.PipeReader); !ok {
t.Errorf("Unexpected input stream: %#v", ro.Stdin)
}
if ro.PullImage {
t.Errorf("PullImage is true for RunContainer, should be false")
}
if ro.Command != "test-command" {
t.Errorf("Unexpected command passed to RunContainer: %s",
ro.Command)
}
if pe.PostExecuteContainerID != "1234" {
t.Errorf("PostExecutor not called with expected ID: %s",
pe.PostExecuteContainerID)
}
if !reflect.DeepEqual(ro.Env, expectedEnv) {
t.Errorf("Unexpected container environment passed to RunContainer: %v, should be %v", ro.Env, expectedEnv)
}
if !reflect.DeepEqual(pe.PostExecuteDestination, "test-command") {
t.Errorf("PostExecutor not called with expected command: %s", pe.PostExecuteDestination)
}
}
func TestExecuteRunContainerError(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
fd := rh.docker.(*docker.FakeDocker)
runContainerError := fmt.Errorf("an error")
fd.RunContainerError = runContainerError
err := rh.Execute("test-command", "", rh.config)
if err != runContainerError {
t.Errorf("Did not get expected error, got %v", err)
}
}
func TestExecuteErrorCreateTarFile(t *testing.T) {
rh := newFakeSTI(&FakeSTI{})
rh.tar.(*test.FakeTar).CreateTarError = errors.New("CreateTarError")
err := rh.Execute("test-command", "", rh.config)
if err == nil || err.Error() != "CreateTarError" {
t.Errorf("An error was expected for CreateTarFile, but got different: %#v", err)
}
}
func TestCleanup(t *testing.T) {
rh := newFakeBaseSTI()
rh.config.WorkingDir = "/working-dir"
preserve := []bool{false, true}
for _, p := range preserve {
rh.config.PreserveWorkingDir = p
rh.fs = &testfs.FakeFileSystem{}
rh.garbage = build.NewDefaultCleaner(rh.fs, rh.docker)
rh.garbage.Cleanup(rh.config)
removedDir := rh.fs.(*testfs.FakeFileSystem).RemoveDirName
if p && removedDir != "" {
t.Errorf("Expected working directory to be preserved, but it was removed.")
} else if !p && removedDir == "" {
t.Errorf("Expected working directory to be removed, but it was preserved.")
}
}
}
func TestNewWithInvalidExcludeRegExp(t *testing.T) {
_, err := New(nil, &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
ExcludeRegExp: "(",
}, nil, build.Overrides{})
if syntaxErr, ok := err.(*syntax.Error); ok && syntaxErr.Code != syntax.ErrMissingParen {
t.Errorf("expected regexp compilation error, got %v", err)
}
}
| wjiangjay/origin | vendor/github.com/openshift/source-to-image/pkg/build/strategies/sti/sti_test.go | GO | apache-2.0 | 28,181 |
/*
* Copyright 2015 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.store.service;
/**
* Builder for AtomicCounter.
*/
public interface AtomicCounterBuilder {
/**
* Sets the name for the atomic counter.
* <p>
* Each atomic counter is identified by a unique name.
* </p>
* <p>
* Note: This is a mandatory parameter.
* </p>
*
* @param name name of the atomic counter
* @return this AtomicCounterBuilder
*/
AtomicCounterBuilder withName(String name);
/**
* Creates this counter on the partition that spans the entire cluster.
* <p>
* When partitioning is disabled, the counter state will be
* ephemeral and does not survive a full cluster restart.
* </p>
* <p>
* Note: By default partitions are enabled.
* </p>
* @return this AtomicCounterBuilder
*/
AtomicCounterBuilder withPartitionsDisabled();
/**
* Instantiates Metering service to gather usage and performance metrics.
* By default, usage data will be stored.
*
* @return this AtomicCounterBuilder
*/
AtomicCounterBuilder withMeteringDisabled();
/**
* Builds a AtomicCounter based on the configuration options
* supplied to this builder.
*
* @return new AtomicCounter
* @throws java.lang.RuntimeException if a mandatory parameter is missing
*/
AtomicCounter build();
/**
* Builds a AsyncAtomicCounter based on the configuration options
* supplied to this builder.
*
* @return new AsyncAtomicCounter
* @throws java.lang.RuntimeException if a mandatory parameter is missing
*/
AsyncAtomicCounter buildAsyncCounter();
}
| jinlongliu/onos | core/api/src/main/java/org/onosproject/store/service/AtomicCounterBuilder.java | Java | apache-2.0 | 2,273 |
<!doctype html>
<html ⚡>
<head>
<meta charset="utf-8">
<title>Forms Examples in AMP</title>
<link rel="canonical" href="amps.html" >
<meta name="viewport" content="width=device-width,minimum-scale=1,initial-scale=1">
<style amp-boilerplate>body{-webkit-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-moz-animation:-amp-start 8s steps(1,end) 0s 1 normal both;-ms-animation:-amp-start 8s steps(1,end) 0s 1 normal both;animation:-amp-start 8s steps(1,end) 0s 1 normal both}@-webkit-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-moz-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-ms-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@-o-keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}@keyframes -amp-start{from{visibility:hidden}to{visibility:visible}}</style><noscript><style amp-boilerplate>body{-webkit-animation:none;-moz-animation:none;-ms-animation:none;animation:none}</style></noscript>
<script async src="https://cdn.ampproject.org/v0.js"></script>
<script async custom-element="amp-form" src="https://cdn.ampproject.org/v0/amp-form-0.1.js"></script>
<script async custom-template="amp-mustache" src="https://cdn.ampproject.org/v0/amp-mustache-latest.js"></script>
<script async custom-element="amp-bind" src="https://cdn.ampproject.org/v0/amp-bind-0.1.js"></script>
</head>
<body>
<p>Submit the form and then press a button to change the text in the below template</p>
<button on="tap:AMP.setState({selected: 1})">1</button>
<button on="tap:AMP.setState({selected: 2})">2</button>
<button on="tap:AMP.setState({selected: 3})">3</button>
<button on="tap:AMP.setState({selected: 4})">4</button>
<p [text]="'$' + selectedRange">$0</p>
<p>Color Selection: <span [text]="colorSelection || 'red'">red</span>
<h4>Enter your name and email.</h4>
<button on="tap:AMP.setState({nameVar: 'Alice', emailVar: '[email protected]'}), myForm.submit">
Set name/email with amp-bind and submit (chained action)
</button>
<form method="post"
action-xhr="/form/echo-json/post"
target="_blank"
id="myForm">
<fieldset>
<label>
<span>Your name</span>
<input type="text" name="name" id="name1" [value]="nameVar" required>
</label>
<label>
<span>Your email</span>
<input type="email" name="email" id="email1" [value]="emailVar" required>
</label>
<label>
<span>Your choice</span>
<select on="change:AMP.setState({colorSelection: event.value})">
<option value="red">red</option>
<option value="green">green</option>
<option value="blue">blue</option>
<option value="yellow">yellow</option>
</select>
</label>
<label>
<span>Your offer</span>
$100
<input type="range" min="100" max="200" step="10" on="change:AMP.setState({selectedRange: event.value})"/>
$200
</label>
<input type="submit" value="Subscribe">
</fieldset>
<div submit-success>
<template type="amp-mustache">
Success! Thanks {{name}} for entering your email: {{email}}
<p>You have selected: <span [text]="selected ? selected : 'No selection'">No selection</span></p>
</template>
</div>
<div submit-error>
<template type="amp-mustache">
Error! Failure to register: {{name}} : {{email}}
<p>You have selected: <span [text]="selected ? selected : 'No selection'">No selection</span></p>
</template>
</div>
</form>
<h2 class="sample-heading">Form validation with amp-bind input values</h2>
<button on="tap:AMP.setState({txt: 'hello'})">
set txt
</button>
<button on="tap:AMP.setState({txt: ''})">
clear
</button>
<form method="post" action-xhr="/form/echo-json/post" target="_blank"
custom-validation-reporting="show-all-on-submit">
<input type="text" name="name" placeholder="Name..." /><br />
<span visible-when-invalid="valueMissing"
validation-for="message">MISSING VALUE</span>
<textarea id="message" required [text]="txt" name="message"></textarea><br />
<input type="submit" value="Submit" />
<div submit-success>
<template type="amp-mustache">
Success! Thanks {{name}} for submitting this message: {{message}}
</template>
</div>
</form>
</body>
</html>
| lannka/amphtml | examples/bind/forms.amp.html | HTML | apache-2.0 | 4,571 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for ResNet50 under graph execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
def image_shape(batch_size):
if data_format() == 'channels_first':
return [batch_size, 3, 224, 224]
return [batch_size, 224, 224, 3]
def random_batch(batch_size):
images = np.random.rand(*image_shape(batch_size)).astype(np.float32)
num_classes = 1000
labels = np.random.randint(
low=0, high=num_classes, size=[batch_size]).astype(np.int32)
one_hot = np.zeros((batch_size, num_classes)).astype(np.float32)
one_hot[np.arange(batch_size), labels] = 1.
return images, one_hot
class ResNet50GraphTest(tf.test.TestCase):
def testApply(self):
# Use small batches for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
batch_size = 8
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = resnet50.ResNet50(data_format())
predictions = model(images, training=False)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
out = sess.run(predictions, feed_dict={images: np_images})
self.assertAllEqual([batch_size, 1000], out.shape)
def testTrainWithSummary(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None), name='images')
labels = tf.placeholder(tf.float32, [None, 1000], name='labels')
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.always_record_summaries():
with tf.contrib.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default():
model = resnet50.ResNet50(data_format())
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
self.assertEqual(321, len(tf.global_variables()))
# Use small batches for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
batch_size = 2
with tf.Session() as sess:
sess.run(init)
sess.run(tf.contrib.summary.summary_writer_initializer_op())
np_images, np_labels = random_batch(batch_size)
sess.run([train_op, tf.contrib.summary.all_summary_ops()],
feed_dict={images: np_images, labels: np_labels})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
class ResNet50Benchmarks(tf.test.Benchmark):
def _report(self, label, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'graph_%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_graph_apply(self):
with tf.Graph().as_default():
images = tf.placeholder(tf.float32, image_shape(None))
model = resnet50.ResNet50(data_format())
predictions = model(images, training=False)
init = tf.global_variables_initializer()
batch_size = 64
with tf.Session() as sess:
sess.run(init)
np_images, _ = random_batch(batch_size)
num_burn, num_iters = (3, 30)
for _ in range(num_burn):
sess.run(predictions, feed_dict={images: np_images})
start = time.time()
for _ in range(num_iters):
# Comparison with the eager execution benchmark in resnet50_test.py
# isn't entirely fair as the time here includes the cost of copying
# the feeds from CPU memory to GPU.
sess.run(predictions, feed_dict={images: np_images})
self._report('apply', start, num_iters, batch_size)
def benchmark_graph_train(self):
for batch_size in [16, 32, 64]:
with tf.Graph().as_default():
np_images, np_labels = random_batch(batch_size)
dataset = tf.data.Dataset.from_tensors((np_images, np_labels)).repeat()
images, labels = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
model = resnet50.ResNet50(data_format())
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
(num_burn, num_iters) = (5, 10)
for _ in range(num_burn):
sess.run(train_op)
start = time.time()
for _ in range(num_iters):
sess.run(train_op)
self._report('train', start, num_iters, batch_size)
if __name__ == '__main__':
tf.test.main()
| ghchinoy/tensorflow | tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py | Python | apache-2.0 | 6,382 |
/*
Copyright (c) 2004-2010, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dojox.string.sprintf"]){
dojo._hasResource["dojox.string.sprintf"]=true;
dojo.provide("dojox.string.sprintf");
dojo.require("dojox.string.tokenize");
dojox.string.sprintf=function(_1,_2){
for(var _3=[],i=1;i<arguments.length;i++){
_3.push(arguments[i]);
}
var _4=new dojox.string.sprintf.Formatter(_1);
return _4.format.apply(_4,_3);
};
dojox.string.sprintf.Formatter=function(_5){
var _6=[];
this._mapped=false;
this._format=_5;
this._tokens=dojox.string.tokenize(_5,this._re,this._parseDelim,this);
};
dojo.extend(dojox.string.sprintf.Formatter,{_re:/\%(?:\(([\w_]+)\)|([1-9]\d*)\$)?([0 +\-\#]*)(\*|\d+)?(\.)?(\*|\d+)?[hlL]?([\%scdeEfFgGiouxX])/g,_parseDelim:function(_7,_8,_9,_a,_b,_c,_d){
if(_7){
this._mapped=true;
}
return {mapping:_7,intmapping:_8,flags:_9,_minWidth:_a,period:_b,_precision:_c,specifier:_d};
},_specifiers:{b:{base:2,isInt:true},o:{base:8,isInt:true},x:{base:16,isInt:true},X:{extend:["x"],toUpper:true},d:{base:10,isInt:true},i:{extend:["d"]},u:{extend:["d"],isUnsigned:true},c:{setArg:function(_e){
if(!isNaN(_e.arg)){
var _f=parseInt(_e.arg);
if(_f<0||_f>127){
throw new Error("invalid character code passed to %c in sprintf");
}
_e.arg=isNaN(_f)?""+_f:String.fromCharCode(_f);
}
}},s:{setMaxWidth:function(_10){
_10.maxWidth=(_10.period==".")?_10.precision:-1;
}},e:{isDouble:true,doubleNotation:"e"},E:{extend:["e"],toUpper:true},f:{isDouble:true,doubleNotation:"f"},F:{extend:["f"]},g:{isDouble:true,doubleNotation:"g"},G:{extend:["g"],toUpper:true}},format:function(_11){
if(this._mapped&&typeof _11!="object"){
throw new Error("format requires a mapping");
}
var str="";
var _12=0;
for(var i=0,_13;i<this._tokens.length;i++){
_13=this._tokens[i];
if(typeof _13=="string"){
str+=_13;
}else{
if(this._mapped){
if(typeof _11[_13.mapping]=="undefined"){
throw new Error("missing key "+_13.mapping);
}
_13.arg=_11[_13.mapping];
}else{
if(_13.intmapping){
var _12=parseInt(_13.intmapping)-1;
}
if(_12>=arguments.length){
throw new Error("got "+arguments.length+" printf arguments, insufficient for '"+this._format+"'");
}
_13.arg=arguments[_12++];
}
if(!_13.compiled){
_13.compiled=true;
_13.sign="";
_13.zeroPad=false;
_13.rightJustify=false;
_13.alternative=false;
var _14={};
for(var fi=_13.flags.length;fi--;){
var _15=_13.flags.charAt(fi);
_14[_15]=true;
switch(_15){
case " ":
_13.sign=" ";
break;
case "+":
_13.sign="+";
break;
case "0":
_13.zeroPad=(_14["-"])?false:true;
break;
case "-":
_13.rightJustify=true;
_13.zeroPad=false;
break;
case "#":
_13.alternative=true;
break;
default:
throw Error("bad formatting flag '"+_13.flags.charAt(fi)+"'");
}
}
_13.minWidth=(_13._minWidth)?parseInt(_13._minWidth):0;
_13.maxWidth=-1;
_13.toUpper=false;
_13.isUnsigned=false;
_13.isInt=false;
_13.isDouble=false;
_13.precision=1;
if(_13.period=="."){
if(_13._precision){
_13.precision=parseInt(_13._precision);
}else{
_13.precision=0;
}
}
var _16=this._specifiers[_13.specifier];
if(typeof _16=="undefined"){
throw new Error("unexpected specifier '"+_13.specifier+"'");
}
if(_16.extend){
dojo.mixin(_16,this._specifiers[_16.extend]);
delete _16.extend;
}
dojo.mixin(_13,_16);
}
if(typeof _13.setArg=="function"){
_13.setArg(_13);
}
if(typeof _13.setMaxWidth=="function"){
_13.setMaxWidth(_13);
}
if(_13._minWidth=="*"){
if(this._mapped){
throw new Error("* width not supported in mapped formats");
}
_13.minWidth=parseInt(arguments[_12++]);
if(isNaN(_13.minWidth)){
throw new Error("the argument for * width at position "+_12+" is not a number in "+this._format);
}
if(_13.minWidth<0){
_13.rightJustify=true;
_13.minWidth=-_13.minWidth;
}
}
if(_13._precision=="*"&&_13.period=="."){
if(this._mapped){
throw new Error("* precision not supported in mapped formats");
}
_13.precision=parseInt(arguments[_12++]);
if(isNaN(_13.precision)){
throw Error("the argument for * precision at position "+_12+" is not a number in "+this._format);
}
if(_13.precision<0){
_13.precision=1;
_13.period="";
}
}
if(_13.isInt){
if(_13.period=="."){
_13.zeroPad=false;
}
this.formatInt(_13);
}else{
if(_13.isDouble){
if(_13.period!="."){
_13.precision=6;
}
this.formatDouble(_13);
}
}
this.fitField(_13);
str+=""+_13.arg;
}
}
return str;
},_zeros10:"0000000000",_spaces10:" ",formatInt:function(_17){
var i=parseInt(_17.arg);
if(!isFinite(i)){
if(typeof _17.arg!="number"){
throw new Error("format argument '"+_17.arg+"' not an integer; parseInt returned "+i);
}
i=0;
}
if(i<0&&(_17.isUnsigned||_17.base!=10)){
i=4294967295+i+1;
}
if(i<0){
_17.arg=(-i).toString(_17.base);
this.zeroPad(_17);
_17.arg="-"+_17.arg;
}else{
_17.arg=i.toString(_17.base);
if(!i&&!_17.precision){
_17.arg="";
}else{
this.zeroPad(_17);
}
if(_17.sign){
_17.arg=_17.sign+_17.arg;
}
}
if(_17.base==16){
if(_17.alternative){
_17.arg="0x"+_17.arg;
}
_17.arg=_17.toUpper?_17.arg.toUpperCase():_17.arg.toLowerCase();
}
if(_17.base==8){
if(_17.alternative&&_17.arg.charAt(0)!="0"){
_17.arg="0"+_17.arg;
}
}
},formatDouble:function(_18){
var f=parseFloat(_18.arg);
if(!isFinite(f)){
if(typeof _18.arg!="number"){
throw new Error("format argument '"+_18.arg+"' not a float; parseFloat returned "+f);
}
f=0;
}
switch(_18.doubleNotation){
case "e":
_18.arg=f.toExponential(_18.precision);
break;
case "f":
_18.arg=f.toFixed(_18.precision);
break;
case "g":
if(Math.abs(f)<0.0001){
_18.arg=f.toExponential(_18.precision>0?_18.precision-1:_18.precision);
}else{
_18.arg=f.toPrecision(_18.precision);
}
if(!_18.alternative){
_18.arg=_18.arg.replace(/(\..*[^0])0*/,"$1");
_18.arg=_18.arg.replace(/\.0*e/,"e").replace(/\.0$/,"");
}
break;
default:
throw new Error("unexpected double notation '"+_18.doubleNotation+"'");
}
_18.arg=_18.arg.replace(/e\+(\d)$/,"e+0$1").replace(/e\-(\d)$/,"e-0$1");
if(dojo.isOpera){
_18.arg=_18.arg.replace(/^\./,"0.");
}
if(_18.alternative){
_18.arg=_18.arg.replace(/^(\d+)$/,"$1.");
_18.arg=_18.arg.replace(/^(\d+)e/,"$1.e");
}
if(f>=0&&_18.sign){
_18.arg=_18.sign+_18.arg;
}
_18.arg=_18.toUpper?_18.arg.toUpperCase():_18.arg.toLowerCase();
},zeroPad:function(_19,_1a){
_1a=(arguments.length==2)?_1a:_19.precision;
if(typeof _19.arg!="string"){
_19.arg=""+_19.arg;
}
var _1b=_1a-10;
while(_19.arg.length<_1b){
_19.arg=(_19.rightJustify)?_19.arg+this._zeros10:this._zeros10+_19.arg;
}
var pad=_1a-_19.arg.length;
_19.arg=(_19.rightJustify)?_19.arg+this._zeros10.substring(0,pad):this._zeros10.substring(0,pad)+_19.arg;
},fitField:function(_1c){
if(_1c.maxWidth>=0&&_1c.arg.length>_1c.maxWidth){
return _1c.arg.substring(0,_1c.maxWidth);
}
if(_1c.zeroPad){
this.zeroPad(_1c,_1c.minWidth);
return;
}
this.spacePad(_1c);
},spacePad:function(_1d,_1e){
_1e=(arguments.length==2)?_1e:_1d.minWidth;
if(typeof _1d.arg!="string"){
_1d.arg=""+_1d.arg;
}
var _1f=_1e-10;
while(_1d.arg.length<_1f){
_1d.arg=(_1d.rightJustify)?_1d.arg+this._spaces10:this._spaces10+_1d.arg;
}
var pad=_1e-_1d.arg.length;
_1d.arg=(_1d.rightJustify)?_1d.arg+this._spaces10.substring(0,pad):this._spaces10.substring(0,pad)+_1d.arg;
}});
}
| ontoden/ontobee | public/js/dojox/string/sprintf.js | JavaScript | apache-2.0 | 7,128 |
/*
Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
The MySQL Connector/J is licensed under the terms of the GPLv2
<http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most MySQL Connectors.
There are special exceptions to the terms and conditions of the GPLv2 as it is applied to
this software, see the FLOSS License Exception
<http://www.mysql.com/about/legal/licensing/foss-exception.html>.
This program is free software; you can redistribute it and/or modify it under the terms
of the GNU General Public License as published by the Free Software Foundation; version 2
of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth
Floor, Boston, MA 02110-1301 USA
*/
package com.mysql.jdbc;
import java.util.Map;
public class CachedResultSetMetaData {
/** Map column names (and all of their permutations) to column indices */
Map<String, Integer> columnNameToIndex = null;
/** Cached Field info */
Field[] fields;
/** Map of fully-specified column names to column indices */
Map<String, Integer> fullColumnNameToIndex = null;
/** Cached ResultSetMetaData */
java.sql.ResultSetMetaData metadata;
public Map<String, Integer> getColumnNameToIndex() {
return columnNameToIndex;
}
public Field[] getFields() {
return fields;
}
public Map<String, Integer> getFullColumnNameToIndex() {
return fullColumnNameToIndex;
}
public java.sql.ResultSetMetaData getMetadata() {
return metadata;
}
} | suthat/signal | vendor/mysql-connector-java-5.1.26/src/com/mysql/jdbc/CachedResultSetMetaData.java | Java | apache-2.0 | 1,923 |
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_INSTALLER_UTIL_CONDITIONAL_WORK_ITEM_LIST_H_
#define CHROME_INSTALLER_UTIL_CONDITIONAL_WORK_ITEM_LIST_H_
#pragma once
#include "chrome/installer/util/work_item_list.h"
#include "base/file_path.h"
#include "base/memory/scoped_ptr.h"
// A WorkItemList subclass that permits conditionally executing a set of
// WorkItems.
class ConditionalWorkItemList : public WorkItemList {
public:
explicit ConditionalWorkItemList(Condition* condition);
virtual ~ConditionalWorkItemList();
// If condition_->ShouldRun() returns true, then execute the items in this
// list and return true iff they all succeed. If condition_->ShouldRun()
// returns false, does nothing and returns true.
virtual bool Do();
// Does a rollback of the items (if any) that were run in Do.
virtual void Rollback();
protected:
// Pointer to a Condition that is used to determine whether to run this
// WorkItemList.
scoped_ptr<Condition> condition_;
};
// Pre-defined conditions:
//------------------------------------------------------------------------------
class ConditionRunIfFileExists : public WorkItem::Condition {
public:
explicit ConditionRunIfFileExists(const FilePath& key_path)
: key_path_(key_path) {}
bool ShouldRun() const;
private:
FilePath key_path_;
};
// Condition class that inverts the ShouldRun result of another Condition.
// This class assumes ownership of original_condition.
class Not : public WorkItem::Condition {
public:
explicit Not(WorkItem::Condition* original_condition)
: original_condition_(original_condition) {}
bool ShouldRun() const;
private:
scoped_ptr<WorkItem::Condition> original_condition_;
};
#endif // CHROME_INSTALLER_UTIL_CONDITIONAL_WORK_ITEM_LIST_H_
| aYukiSekiguchi/ACCESS-Chromium | chrome/installer/util/conditional_work_item_list.h | C | bsd-3-clause | 1,912 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/http/http_auth_gssapi_posix.h"
#include <limits>
#include <string>
#include "base/base64.h"
#include "base/files/file_path.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/http/http_auth_challenge_tokenizer.h"
// These are defined for the GSSAPI library:
// Paraphrasing the comments from gssapi.h:
// "The implementation must reserve static storage for a
// gss_OID_desc object for each constant. That constant
// should be initialized to point to that gss_OID_desc."
// These are encoded using ASN.1 BER encoding.
namespace {
static gss_OID_desc GSS_C_NT_USER_NAME_VAL = {
10,
const_cast<char*>("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x01")
};
static gss_OID_desc GSS_C_NT_MACHINE_UID_NAME_VAL = {
10,
const_cast<char*>("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x02")
};
static gss_OID_desc GSS_C_NT_STRING_UID_NAME_VAL = {
10,
const_cast<char*>("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03")
};
static gss_OID_desc GSS_C_NT_HOSTBASED_SERVICE_X_VAL = {
6,
const_cast<char*>("\x2b\x06\x01\x05\x06\x02")
};
static gss_OID_desc GSS_C_NT_HOSTBASED_SERVICE_VAL = {
10,
const_cast<char*>("\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x04")
};
static gss_OID_desc GSS_C_NT_ANONYMOUS_VAL = {
6,
const_cast<char*>("\x2b\x06\01\x05\x06\x03")
};
static gss_OID_desc GSS_C_NT_EXPORT_NAME_VAL = {
6,
const_cast<char*>("\x2b\x06\x01\x05\x06\x04")
};
} // namespace
// Heimdal >= 1.4 will define the following as preprocessor macros.
// To avoid conflicting declarations, we have to undefine these.
#undef GSS_C_NT_USER_NAME
#undef GSS_C_NT_MACHINE_UID_NAME
#undef GSS_C_NT_STRING_UID_NAME
#undef GSS_C_NT_HOSTBASED_SERVICE_X
#undef GSS_C_NT_HOSTBASED_SERVICE
#undef GSS_C_NT_ANONYMOUS
#undef GSS_C_NT_EXPORT_NAME
gss_OID GSS_C_NT_USER_NAME = &GSS_C_NT_USER_NAME_VAL;
gss_OID GSS_C_NT_MACHINE_UID_NAME = &GSS_C_NT_MACHINE_UID_NAME_VAL;
gss_OID GSS_C_NT_STRING_UID_NAME = &GSS_C_NT_STRING_UID_NAME_VAL;
gss_OID GSS_C_NT_HOSTBASED_SERVICE_X = &GSS_C_NT_HOSTBASED_SERVICE_X_VAL;
gss_OID GSS_C_NT_HOSTBASED_SERVICE = &GSS_C_NT_HOSTBASED_SERVICE_VAL;
gss_OID GSS_C_NT_ANONYMOUS = &GSS_C_NT_ANONYMOUS_VAL;
gss_OID GSS_C_NT_EXPORT_NAME = &GSS_C_NT_EXPORT_NAME_VAL;
namespace net {
// Exported mechanism for GSSAPI. We always use SPNEGO:
// iso.org.dod.internet.security.mechanism.snego (1.3.6.1.5.5.2)
gss_OID_desc CHROME_GSS_SPNEGO_MECH_OID_DESC_VAL = {
6,
const_cast<char*>("\x2b\x06\x01\x05\x05\x02")
};
gss_OID CHROME_GSS_SPNEGO_MECH_OID_DESC =
&CHROME_GSS_SPNEGO_MECH_OID_DESC_VAL;
// Debugging helpers.
namespace {
std::string DisplayStatus(OM_uint32 major_status,
OM_uint32 minor_status) {
if (major_status == GSS_S_COMPLETE)
return "OK";
return base::StringPrintf("0x%08X 0x%08X", major_status, minor_status);
}
std::string DisplayCode(GSSAPILibrary* gssapi_lib,
OM_uint32 status,
OM_uint32 status_code_type) {
const int kMaxDisplayIterations = 8;
const size_t kMaxMsgLength = 4096;
// msg_ctx needs to be outside the loop because it is invoked multiple times.
OM_uint32 msg_ctx = 0;
std::string rv = base::StringPrintf("(0x%08X)", status);
// This loop should continue iterating until msg_ctx is 0 after the first
// iteration. To be cautious and prevent an infinite loop, it stops after
// a finite number of iterations as well. As an added sanity check, no
// individual message may exceed |kMaxMsgLength|, and the final result
// will not exceed |kMaxMsgLength|*2-1.
for (int i = 0; i < kMaxDisplayIterations && rv.size() < kMaxMsgLength;
++i) {
OM_uint32 min_stat;
gss_buffer_desc_struct msg = GSS_C_EMPTY_BUFFER;
OM_uint32 maj_stat =
gssapi_lib->display_status(&min_stat, status, status_code_type,
GSS_C_NULL_OID, &msg_ctx, &msg);
if (maj_stat == GSS_S_COMPLETE) {
int msg_len = (msg.length > kMaxMsgLength) ?
static_cast<int>(kMaxMsgLength) :
static_cast<int>(msg.length);
if (msg_len > 0 && msg.value != NULL) {
rv += base::StringPrintf(" %.*s", msg_len,
static_cast<char*>(msg.value));
}
}
gssapi_lib->release_buffer(&min_stat, &msg);
if (!msg_ctx)
break;
}
return rv;
}
std::string DisplayExtendedStatus(GSSAPILibrary* gssapi_lib,
OM_uint32 major_status,
OM_uint32 minor_status) {
if (major_status == GSS_S_COMPLETE)
return "OK";
std::string major = DisplayCode(gssapi_lib, major_status, GSS_C_GSS_CODE);
std::string minor = DisplayCode(gssapi_lib, minor_status, GSS_C_MECH_CODE);
return base::StringPrintf("Major: %s | Minor: %s", major.c_str(),
minor.c_str());
}
// ScopedName releases a gss_name_t when it goes out of scope.
class ScopedName {
public:
ScopedName(gss_name_t name,
GSSAPILibrary* gssapi_lib)
: name_(name),
gssapi_lib_(gssapi_lib) {
DCHECK(gssapi_lib_);
}
~ScopedName() {
if (name_ != GSS_C_NO_NAME) {
OM_uint32 minor_status = 0;
OM_uint32 major_status =
gssapi_lib_->release_name(&minor_status, &name_);
if (major_status != GSS_S_COMPLETE) {
LOG(WARNING) << "Problem releasing name. "
<< DisplayStatus(major_status, minor_status);
}
name_ = GSS_C_NO_NAME;
}
}
private:
gss_name_t name_;
GSSAPILibrary* gssapi_lib_;
DISALLOW_COPY_AND_ASSIGN(ScopedName);
};
// ScopedBuffer releases a gss_buffer_t when it goes out of scope.
class ScopedBuffer {
public:
ScopedBuffer(gss_buffer_t buffer,
GSSAPILibrary* gssapi_lib)
: buffer_(buffer),
gssapi_lib_(gssapi_lib) {
DCHECK(gssapi_lib_);
}
~ScopedBuffer() {
if (buffer_ != GSS_C_NO_BUFFER) {
OM_uint32 minor_status = 0;
OM_uint32 major_status =
gssapi_lib_->release_buffer(&minor_status, buffer_);
if (major_status != GSS_S_COMPLETE) {
LOG(WARNING) << "Problem releasing buffer. "
<< DisplayStatus(major_status, minor_status);
}
buffer_ = GSS_C_NO_BUFFER;
}
}
private:
gss_buffer_t buffer_;
GSSAPILibrary* gssapi_lib_;
DISALLOW_COPY_AND_ASSIGN(ScopedBuffer);
};
namespace {
std::string AppendIfPredefinedValue(gss_OID oid,
gss_OID predefined_oid,
const char* predefined_oid_name) {
DCHECK(oid);
DCHECK(predefined_oid);
DCHECK(predefined_oid_name);
std::string output;
if (oid->length != predefined_oid->length)
return output;
if (0 != memcmp(oid->elements,
predefined_oid->elements,
predefined_oid->length))
return output;
output += " (";
output += predefined_oid_name;
output += ")";
return output;
}
} // namespace
std::string DescribeOid(GSSAPILibrary* gssapi_lib, const gss_OID oid) {
if (!oid)
return "<NULL>";
std::string output;
const size_t kMaxCharsToPrint = 1024;
OM_uint32 byte_length = oid->length;
size_t char_length = byte_length / sizeof(char);
if (char_length > kMaxCharsToPrint) {
// This might be a plain ASCII string.
// Check if the first |kMaxCharsToPrint| characters
// contain only printable characters and are NULL terminated.
const char* str = reinterpret_cast<const char*>(oid);
size_t str_length = 0;
for ( ; str_length < kMaxCharsToPrint; ++str_length) {
if (!str[str_length] || !isprint(str[str_length]))
break;
}
if (!str[str_length]) {
output += base::StringPrintf("\"%s\"", str);
return output;
}
}
output = base::StringPrintf("(%u) \"", byte_length);
if (!oid->elements) {
output += "<NULL>";
return output;
}
const unsigned char* elements =
reinterpret_cast<const unsigned char*>(oid->elements);
// Don't print more than |kMaxCharsToPrint| characters.
size_t i = 0;
for ( ; (i < byte_length) && (i < kMaxCharsToPrint); ++i) {
output += base::StringPrintf("\\x%02X", elements[i]);
}
if (i >= kMaxCharsToPrint)
output += "...";
output += "\"";
// Check if the OID is one of the predefined values.
output += AppendIfPredefinedValue(oid,
GSS_C_NT_USER_NAME,
"GSS_C_NT_USER_NAME");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_MACHINE_UID_NAME,
"GSS_C_NT_MACHINE_UID_NAME");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_STRING_UID_NAME,
"GSS_C_NT_STRING_UID_NAME");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_HOSTBASED_SERVICE_X,
"GSS_C_NT_HOSTBASED_SERVICE_X");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_HOSTBASED_SERVICE,
"GSS_C_NT_HOSTBASED_SERVICE");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_ANONYMOUS,
"GSS_C_NT_ANONYMOUS");
output += AppendIfPredefinedValue(oid,
GSS_C_NT_EXPORT_NAME,
"GSS_C_NT_EXPORT_NAME");
return output;
}
std::string DescribeName(GSSAPILibrary* gssapi_lib, const gss_name_t name) {
OM_uint32 major_status = 0;
OM_uint32 minor_status = 0;
gss_buffer_desc_struct output_name_buffer = GSS_C_EMPTY_BUFFER;
gss_OID_desc output_name_type_desc = GSS_C_EMPTY_BUFFER;
gss_OID output_name_type = &output_name_type_desc;
major_status = gssapi_lib->display_name(&minor_status,
name,
&output_name_buffer,
&output_name_type);
ScopedBuffer scoped_output_name(&output_name_buffer, gssapi_lib);
if (major_status != GSS_S_COMPLETE) {
std::string error =
base::StringPrintf("Unable to describe name 0x%p, %s",
name,
DisplayExtendedStatus(gssapi_lib,
major_status,
minor_status).c_str());
return error;
}
int len = output_name_buffer.length;
std::string description = base::StringPrintf(
"%*s (Type %s)",
len,
reinterpret_cast<const char*>(output_name_buffer.value),
DescribeOid(gssapi_lib, output_name_type).c_str());
return description;
}
std::string DescribeContext(GSSAPILibrary* gssapi_lib,
const gss_ctx_id_t context_handle) {
OM_uint32 major_status = 0;
OM_uint32 minor_status = 0;
gss_name_t src_name = GSS_C_NO_NAME;
gss_name_t targ_name = GSS_C_NO_NAME;
OM_uint32 lifetime_rec = 0;
gss_OID mech_type = GSS_C_NO_OID;
OM_uint32 ctx_flags = 0;
int locally_initiated = 0;
int open = 0;
if (context_handle == GSS_C_NO_CONTEXT)
return std::string("Context: GSS_C_NO_CONTEXT");
major_status = gssapi_lib->inquire_context(&minor_status,
context_handle,
&src_name,
&targ_name,
&lifetime_rec,
&mech_type,
&ctx_flags,
&locally_initiated,
&open);
ScopedName(src_name, gssapi_lib);
ScopedName(targ_name, gssapi_lib);
if (major_status != GSS_S_COMPLETE) {
std::string error =
base::StringPrintf("Unable to describe context 0x%p, %s",
context_handle,
DisplayExtendedStatus(gssapi_lib,
major_status,
minor_status).c_str());
return error;
}
std::string source(DescribeName(gssapi_lib, src_name));
std::string target(DescribeName(gssapi_lib, targ_name));
std::string description = base::StringPrintf("Context 0x%p: "
"Source \"%s\", "
"Target \"%s\", "
"lifetime %d, "
"mechanism %s, "
"flags 0x%08X, "
"local %d, "
"open %d",
context_handle,
source.c_str(),
target.c_str(),
lifetime_rec,
DescribeOid(gssapi_lib,
mech_type).c_str(),
ctx_flags,
locally_initiated,
open);
return description;
}
} // namespace
GSSAPISharedLibrary::GSSAPISharedLibrary(const std::string& gssapi_library_name)
: initialized_(false),
gssapi_library_name_(gssapi_library_name),
gssapi_library_(NULL),
import_name_(NULL),
release_name_(NULL),
release_buffer_(NULL),
display_name_(NULL),
display_status_(NULL),
init_sec_context_(NULL),
wrap_size_limit_(NULL),
delete_sec_context_(NULL),
inquire_context_(NULL) {
}
GSSAPISharedLibrary::~GSSAPISharedLibrary() {
if (gssapi_library_) {
base::UnloadNativeLibrary(gssapi_library_);
gssapi_library_ = NULL;
}
}
bool GSSAPISharedLibrary::Init() {
if (!initialized_)
InitImpl();
return initialized_;
}
bool GSSAPISharedLibrary::InitImpl() {
DCHECK(!initialized_);
#if defined(DLOPEN_KERBEROS)
gssapi_library_ = LoadSharedLibrary();
if (gssapi_library_ == NULL)
return false;
#endif // defined(DLOPEN_KERBEROS)
initialized_ = true;
return true;
}
base::NativeLibrary GSSAPISharedLibrary::LoadSharedLibrary() {
const char* const* library_names;
size_t num_lib_names;
const char* user_specified_library[1];
if (!gssapi_library_name_.empty()) {
user_specified_library[0] = gssapi_library_name_.c_str();
library_names = user_specified_library;
num_lib_names = 1;
} else {
static const char* const kDefaultLibraryNames[] = {
#if defined(OS_MACOSX)
"libgssapi_krb5.dylib" // MIT Kerberos
#elif defined(OS_OPENBSD)
"libgssapi.so" // Heimdal - OpenBSD
#else
"libgssapi_krb5.so.2", // MIT Kerberos - FC, Suse10, Debian
"libgssapi.so.4", // Heimdal - Suse10, MDK
"libgssapi.so.2", // Heimdal - Gentoo
"libgssapi.so.1" // Heimdal - Suse9, CITI - FC, MDK, Suse10
#endif
};
library_names = kDefaultLibraryNames;
num_lib_names = arraysize(kDefaultLibraryNames);
}
for (size_t i = 0; i < num_lib_names; ++i) {
const char* library_name = library_names[i];
base::FilePath file_path(library_name);
// TODO(asanka): Move library loading to a separate thread.
// http://crbug.com/66702
base::ThreadRestrictions::ScopedAllowIO allow_io_temporarily;
base::NativeLibrary lib = base::LoadNativeLibrary(file_path, NULL);
if (lib) {
// Only return this library if we can bind the functions we need.
if (BindMethods(lib))
return lib;
base::UnloadNativeLibrary(lib);
}
}
LOG(WARNING) << "Unable to find a compatible GSSAPI library";
return NULL;
}
#if defined(DLOPEN_KERBEROS)
#define BIND(lib, x) \
DCHECK(lib); \
gss_##x##_type x = reinterpret_cast<gss_##x##_type>( \
base::GetFunctionPointerFromNativeLibrary(lib, "gss_" #x)); \
if (x == NULL) { \
LOG(WARNING) << "Unable to bind function \"" << "gss_" #x << "\""; \
return false; \
}
#else
#define BIND(lib, x) gss_##x##_type x = gss_##x
#endif
bool GSSAPISharedLibrary::BindMethods(base::NativeLibrary lib) {
BIND(lib, import_name);
BIND(lib, release_name);
BIND(lib, release_buffer);
BIND(lib, display_name);
BIND(lib, display_status);
BIND(lib, init_sec_context);
BIND(lib, wrap_size_limit);
BIND(lib, delete_sec_context);
BIND(lib, inquire_context);
import_name_ = import_name;
release_name_ = release_name;
release_buffer_ = release_buffer;
display_name_ = display_name;
display_status_ = display_status;
init_sec_context_ = init_sec_context;
wrap_size_limit_ = wrap_size_limit;
delete_sec_context_ = delete_sec_context;
inquire_context_ = inquire_context;
return true;
}
#undef BIND
OM_uint32 GSSAPISharedLibrary::import_name(
OM_uint32* minor_status,
const gss_buffer_t input_name_buffer,
const gss_OID input_name_type,
gss_name_t* output_name) {
DCHECK(initialized_);
return import_name_(minor_status, input_name_buffer, input_name_type,
output_name);
}
OM_uint32 GSSAPISharedLibrary::release_name(
OM_uint32* minor_status,
gss_name_t* input_name) {
DCHECK(initialized_);
return release_name_(minor_status, input_name);
}
OM_uint32 GSSAPISharedLibrary::release_buffer(
OM_uint32* minor_status,
gss_buffer_t buffer) {
DCHECK(initialized_);
return release_buffer_(minor_status, buffer);
}
OM_uint32 GSSAPISharedLibrary::display_name(
OM_uint32* minor_status,
const gss_name_t input_name,
gss_buffer_t output_name_buffer,
gss_OID* output_name_type) {
DCHECK(initialized_);
return display_name_(minor_status,
input_name,
output_name_buffer,
output_name_type);
}
OM_uint32 GSSAPISharedLibrary::display_status(
OM_uint32* minor_status,
OM_uint32 status_value,
int status_type,
const gss_OID mech_type,
OM_uint32* message_context,
gss_buffer_t status_string) {
DCHECK(initialized_);
return display_status_(minor_status, status_value, status_type, mech_type,
message_context, status_string);
}
OM_uint32 GSSAPISharedLibrary::init_sec_context(
OM_uint32* minor_status,
const gss_cred_id_t initiator_cred_handle,
gss_ctx_id_t* context_handle,
const gss_name_t target_name,
const gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
const gss_channel_bindings_t input_chan_bindings,
const gss_buffer_t input_token,
gss_OID* actual_mech_type,
gss_buffer_t output_token,
OM_uint32* ret_flags,
OM_uint32* time_rec) {
DCHECK(initialized_);
return init_sec_context_(minor_status,
initiator_cred_handle,
context_handle,
target_name,
mech_type,
req_flags,
time_req,
input_chan_bindings,
input_token,
actual_mech_type,
output_token,
ret_flags,
time_rec);
}
OM_uint32 GSSAPISharedLibrary::wrap_size_limit(
OM_uint32* minor_status,
const gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 req_output_size,
OM_uint32* max_input_size) {
DCHECK(initialized_);
return wrap_size_limit_(minor_status,
context_handle,
conf_req_flag,
qop_req,
req_output_size,
max_input_size);
}
OM_uint32 GSSAPISharedLibrary::delete_sec_context(
OM_uint32* minor_status,
gss_ctx_id_t* context_handle,
gss_buffer_t output_token) {
// This is called from the owner class' destructor, even if
// Init() is not called, so we can't assume |initialized_|
// is set.
if (!initialized_)
return 0;
return delete_sec_context_(minor_status,
context_handle,
output_token);
}
OM_uint32 GSSAPISharedLibrary::inquire_context(
OM_uint32* minor_status,
const gss_ctx_id_t context_handle,
gss_name_t* src_name,
gss_name_t* targ_name,
OM_uint32* lifetime_rec,
gss_OID* mech_type,
OM_uint32* ctx_flags,
int* locally_initiated,
int* open) {
DCHECK(initialized_);
return inquire_context_(minor_status,
context_handle,
src_name,
targ_name,
lifetime_rec,
mech_type,
ctx_flags,
locally_initiated,
open);
}
ScopedSecurityContext::ScopedSecurityContext(GSSAPILibrary* gssapi_lib)
: security_context_(GSS_C_NO_CONTEXT),
gssapi_lib_(gssapi_lib) {
DCHECK(gssapi_lib_);
}
ScopedSecurityContext::~ScopedSecurityContext() {
if (security_context_ != GSS_C_NO_CONTEXT) {
gss_buffer_desc output_token = GSS_C_EMPTY_BUFFER;
OM_uint32 minor_status = 0;
OM_uint32 major_status = gssapi_lib_->delete_sec_context(
&minor_status, &security_context_, &output_token);
if (major_status != GSS_S_COMPLETE) {
LOG(WARNING) << "Problem releasing security_context. "
<< DisplayStatus(major_status, minor_status);
}
security_context_ = GSS_C_NO_CONTEXT;
}
}
HttpAuthGSSAPI::HttpAuthGSSAPI(GSSAPILibrary* library,
const std::string& scheme,
gss_OID gss_oid)
: scheme_(scheme),
gss_oid_(gss_oid),
library_(library),
scoped_sec_context_(library),
can_delegate_(false) {
DCHECK(library_);
}
HttpAuthGSSAPI::~HttpAuthGSSAPI() {
}
bool HttpAuthGSSAPI::Init() {
if (!library_)
return false;
return library_->Init();
}
bool HttpAuthGSSAPI::NeedsIdentity() const {
return decoded_server_auth_token_.empty();
}
bool HttpAuthGSSAPI::AllowsExplicitCredentials() const {
return false;
}
void HttpAuthGSSAPI::Delegate() {
can_delegate_ = true;
}
HttpAuth::AuthorizationResult HttpAuthGSSAPI::ParseChallenge(
HttpAuthChallengeTokenizer* tok) {
// Verify the challenge's auth-scheme.
if (!LowerCaseEqualsASCII(tok->scheme(), StringToLowerASCII(scheme_).c_str()))
return HttpAuth::AUTHORIZATION_RESULT_INVALID;
std::string encoded_auth_token = tok->base64_param();
if (encoded_auth_token.empty()) {
// If a context has already been established, an empty Negotiate challenge
// should be treated as a rejection of the current attempt.
if (scoped_sec_context_.get() != GSS_C_NO_CONTEXT)
return HttpAuth::AUTHORIZATION_RESULT_REJECT;
DCHECK(decoded_server_auth_token_.empty());
return HttpAuth::AUTHORIZATION_RESULT_ACCEPT;
} else {
// If a context has not already been established, additional tokens should
// not be present in the auth challenge.
if (scoped_sec_context_.get() == GSS_C_NO_CONTEXT)
return HttpAuth::AUTHORIZATION_RESULT_INVALID;
}
// Make sure the additional token is base64 encoded.
std::string decoded_auth_token;
bool base64_rv = base::Base64Decode(encoded_auth_token, &decoded_auth_token);
if (!base64_rv)
return HttpAuth::AUTHORIZATION_RESULT_INVALID;
decoded_server_auth_token_ = decoded_auth_token;
return HttpAuth::AUTHORIZATION_RESULT_ACCEPT;
}
int HttpAuthGSSAPI::GenerateAuthToken(const AuthCredentials* credentials,
const std::string& spn,
std::string* auth_token) {
DCHECK(auth_token);
gss_buffer_desc input_token = GSS_C_EMPTY_BUFFER;
input_token.length = decoded_server_auth_token_.length();
input_token.value = (input_token.length > 0) ?
const_cast<char*>(decoded_server_auth_token_.data()) :
NULL;
gss_buffer_desc output_token = GSS_C_EMPTY_BUFFER;
ScopedBuffer scoped_output_token(&output_token, library_);
int rv = GetNextSecurityToken(spn, &input_token, &output_token);
if (rv != OK)
return rv;
// Base64 encode data in output buffer and prepend the scheme.
std::string encode_input(static_cast<char*>(output_token.value),
output_token.length);
std::string encode_output;
base::Base64Encode(encode_input, &encode_output);
*auth_token = scheme_ + " " + encode_output;
return OK;
}
namespace {
// GSSAPI status codes consist of a calling error (essentially, a programmer
// bug), a routine error (defined by the RFC), and supplementary information,
// all bitwise-or'ed together in different regions of the 32 bit return value.
// This means a simple switch on the return codes is not sufficient.
int MapImportNameStatusToError(OM_uint32 major_status) {
VLOG(1) << "import_name returned 0x" << std::hex << major_status;
if (major_status == GSS_S_COMPLETE)
return OK;
if (GSS_CALLING_ERROR(major_status) != 0)
return ERR_UNEXPECTED;
OM_uint32 routine_error = GSS_ROUTINE_ERROR(major_status);
switch (routine_error) {
case GSS_S_FAILURE:
// Looking at the MIT Kerberos implementation, this typically is returned
// when memory allocation fails. However, the API does not guarantee
// that this is the case, so using ERR_UNEXPECTED rather than
// ERR_OUT_OF_MEMORY.
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_BAD_NAME:
case GSS_S_BAD_NAMETYPE:
return ERR_MALFORMED_IDENTITY;
case GSS_S_DEFECTIVE_TOKEN:
// Not mentioned in the API, but part of code.
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_BAD_MECH:
return ERR_UNSUPPORTED_AUTH_SCHEME;
default:
return ERR_UNDOCUMENTED_SECURITY_LIBRARY_STATUS;
}
}
int MapInitSecContextStatusToError(OM_uint32 major_status) {
VLOG(1) << "init_sec_context returned 0x" << std::hex << major_status;
// Although GSS_S_CONTINUE_NEEDED is an additional bit, it seems like
// other code just checks if major_status is equivalent to it to indicate
// that there are no other errors included.
if (major_status == GSS_S_COMPLETE || major_status == GSS_S_CONTINUE_NEEDED)
return OK;
if (GSS_CALLING_ERROR(major_status) != 0)
return ERR_UNEXPECTED;
OM_uint32 routine_status = GSS_ROUTINE_ERROR(major_status);
switch (routine_status) {
case GSS_S_DEFECTIVE_TOKEN:
return ERR_INVALID_RESPONSE;
case GSS_S_DEFECTIVE_CREDENTIAL:
// Not expected since this implementation uses the default credential.
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_BAD_SIG:
// Probably won't happen, but it's a bad response.
return ERR_INVALID_RESPONSE;
case GSS_S_NO_CRED:
return ERR_INVALID_AUTH_CREDENTIALS;
case GSS_S_CREDENTIALS_EXPIRED:
return ERR_INVALID_AUTH_CREDENTIALS;
case GSS_S_BAD_BINDINGS:
// This only happens with mutual authentication.
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_NO_CONTEXT:
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_BAD_NAMETYPE:
return ERR_UNSUPPORTED_AUTH_SCHEME;
case GSS_S_BAD_NAME:
return ERR_UNSUPPORTED_AUTH_SCHEME;
case GSS_S_BAD_MECH:
return ERR_UNEXPECTED_SECURITY_LIBRARY_STATUS;
case GSS_S_FAILURE:
// This should be an "Unexpected Security Status" according to the
// GSSAPI documentation, but it's typically used to indicate that
// credentials are not correctly set up on a user machine, such
// as a missing credential cache or hitting this after calling
// kdestroy.
// TODO(cbentzel): Use minor code for even better mapping?
return ERR_MISSING_AUTH_CREDENTIALS;
default:
if (routine_status != 0)
return ERR_UNDOCUMENTED_SECURITY_LIBRARY_STATUS;
break;
}
OM_uint32 supplemental_status = GSS_SUPPLEMENTARY_INFO(major_status);
// Replays could indicate an attack.
if (supplemental_status & (GSS_S_DUPLICATE_TOKEN | GSS_S_OLD_TOKEN |
GSS_S_UNSEQ_TOKEN | GSS_S_GAP_TOKEN))
return ERR_INVALID_RESPONSE;
// At this point, every documented status has been checked.
return ERR_UNDOCUMENTED_SECURITY_LIBRARY_STATUS;
}
}
int HttpAuthGSSAPI::GetNextSecurityToken(const std::string& spn,
gss_buffer_t in_token,
gss_buffer_t out_token) {
// Create a name for the principal
// TODO(cbentzel): Just do this on the first pass?
std::string spn_principal = spn;
gss_buffer_desc spn_buffer = GSS_C_EMPTY_BUFFER;
spn_buffer.value = const_cast<char*>(spn_principal.c_str());
spn_buffer.length = spn_principal.size() + 1;
OM_uint32 minor_status = 0;
gss_name_t principal_name = GSS_C_NO_NAME;
OM_uint32 major_status = library_->import_name(
&minor_status,
&spn_buffer,
GSS_C_NT_HOSTBASED_SERVICE,
&principal_name);
int rv = MapImportNameStatusToError(major_status);
if (rv != OK) {
LOG(ERROR) << "Problem importing name from "
<< "spn \"" << spn_principal << "\"\n"
<< DisplayExtendedStatus(library_, major_status, minor_status);
return rv;
}
ScopedName scoped_name(principal_name, library_);
// Continue creating a security context.
OM_uint32 req_flags = 0;
if (can_delegate_)
req_flags |= GSS_C_DELEG_FLAG;
major_status = library_->init_sec_context(
&minor_status,
GSS_C_NO_CREDENTIAL,
scoped_sec_context_.receive(),
principal_name,
gss_oid_,
req_flags,
GSS_C_INDEFINITE,
GSS_C_NO_CHANNEL_BINDINGS,
in_token,
NULL, // actual_mech_type
out_token,
NULL, // ret flags
NULL);
rv = MapInitSecContextStatusToError(major_status);
if (rv != OK) {
LOG(ERROR) << "Problem initializing context. \n"
<< DisplayExtendedStatus(library_, major_status, minor_status)
<< '\n'
<< DescribeContext(library_, scoped_sec_context_.get());
}
return rv;
}
} // namespace net
| TeamEOS/external_chromium_org | net/http/http_auth_gssapi_posix.cc | C++ | bsd-3-clause | 31,051 |
# given a possibly empty string, format it into a comment or -nc
# input is in:
# $_[0] = possible comment string
#
# output is:
# string for use by CVS functions
sub CTCvsFormatComment {
local( $ret ) = "" ;
if ( $_[0] ne "" ) {
$ret = "-m \"" . $_[0] . "\"" ;
}
$ret ;
}
# given a project and spec line, compute the server line
# input is in:
# $_[0] = project
# $_[1] = spec line
#
# output:
# return a sever line, or "" if not a croot
sub CTCvsServerLine {
&CTUDebug( "in CTCvsServerLine\n" ) ;
local( $ret ) = "" ;
local( $type ) = &CTSpecType( $_[1] ) ;
if ( $type eq "croot" ) {
local( $options ) = &CTSpecOptions( $_[1] ) ;
local( $sline ) = &CTSpecFindOption( $options, "server" ) ;
if ( $sline ne "" ) {
$ret = join( ":", split( /,/, $sline ));
}
}
&CTUDebug( "out of CTCvsServerLine\n" ) ;
$ret ;
}
# if needed log into a cvs server
# input is in:
# $_[0] = server line
#
# output:
# return success or failure
sub CTCvsLogin {
&CTUDebug( "in CTCvsLogin\n" ) ;
local( $ret ) = 0 ;
&CTUDebug( "server line is '" . $_[0] . "'\n" ) ;
if ( $_[0] ne "" ) {
# ok. we actually have something, lets look in .cvspass
local( $path ) ;
local( *PASSFILE ) ;
if ( $ENV{"PENV"} eq "WIN32" ) {
$path = $ENV{"HOME"} . "/.cvspass" ;
} else {
# $path = "~/.cvspass" ;
$path = $ENV{"HOME"} . "/.cvspass" ;
}
&CTUDebug( "looking for '" . $path . "'\n" ) ;
if ( -e $path ) {
local( $passdone ) = 0 ;
local( $ok ) = 0 ;
open( PASSFILE, "< $path" ) ;
while ( <PASSFILE> ) {
s/\n$// ;
local( @line ) = split ;
# ok, the server line is in [0] and the password in [1].
&CTUDebug( "server line from .cvspass is '" . $line[0] .
"'\n" ) ;
if ( $line[0] eq $_[0] ) {
# we're fine, we're already logged in to that
$ret = 1 ;
$passdone = 1;
}
}
if ( ! $passdone ) {
# ran out of lines in the file
local( $line ) = "cvs -d " . $_[0] . " login >/dev/null" ;
&CTUDebug( "about to run '" . $line . "'\n" ) ;
$ret = &CTURetCode( system( $line )) ;
}
} else {
&CTUDebug( $path . " file does not exist\n" ) ;
local( $line ) = "cvs -d " . $_[0] . " login >/dev/null" ;
&CTUDebug( "about to run '" . $line . "'\n" ) ;
$ret = &CTURetCode( system( $line )) ;
}
}
&CTUDebug( "out of CTCvsLogin\n" ) ;
$ret ;
}
require "$tool/built/include/ctproj.pl" ;
# add a versioned element to the repository
# input is in:
# $_[0] = element
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsAdd {
&CTUDebug( "in CTCvsAdd\n" ) ;
# first we need to 'login' to the repository
local( $comment ) = &CTCvsFormatComment( $_[3] ) ;
local( $serve ) = &CTCvsServerLine( $_[1], $_[2] ) ;
local( $ret ) = &CTCvsLogin( $serve ) ;
if ( $ret ) {
# now issue the add command
local( $root ) = &CTProjRoot( $_[1] ) ;
local( $line ) = "" ;
local( $elem ) = $_[0] ;
if ( $elem =~ /^\// ) {
local( $proj ) = $_[1] ;
$proj =~ tr/a-z/A-Z/ ;
$line = "cd \$" . $proj . "; " ;
$elem =~ s/^$root\/// ;
}
$line = $line . "cvs -d " . $serve . " add " . $comment . " $elem" ;
&CTUDebug( "about to execute '" . $line . "'\n" ) ;
$ret = &CTURetCode( system( $line )) ;
}
&CTUDebug( "out of CTCvsAdd\n" ) ;
$ret ;
}
# ci a versioned element to the repository
# input is in:
# $_[0] = element
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsCi {
&CTUDebug( "in CTCvsCi\n" ) ;
# first we need to 'login' to the repository
local( $comment ) = &CTCvsFormatComment( $_[3] ) ;
local( $serve ) = &CTCvsServerLine( $_[1], $_[2] ) ;
local( $ret ) = &CTCvsLogin( $serve ) ;
if ( $ret ) {
# now issue the add command
local( $root ) = &CTProjRoot( $_[1] ) ;
local( $line ) = "" ;
local( $elem ) = $_[0] ;
if ( $elem =~ /^\// ) {
local ( $proj ) = $_[1] ;
$proj =~ tr/a-z/A-Z/ ;
$line = "cd \$" . $proj . "; " ;
$elem =~ s/^$root\/// ;
}
$line = $line . "cvs -d " . $serve . " ci " . $comment . " $elem" ;
&CTUDebug( "about to execute '" . $line . "'\n" ) ;
$ret = &CTURetCode( system( $line )) ;
}
&CTUDebug( "out of CTCvsCi\n" ) ;
$ret ;
}
# rm a versioned element from the repository
# input is in:
# $_[0] = element
# $_[1] = project
# $_[2] = spec line
#
# output:
# return success or failure
sub CTCvsRm {
&CTUDebug( "in CTCvsRm\n" ) ;
# first we need to 'login' to the repository
local( $serve ) = &CTCvsServerLine( $_[1], $_[2] ) ;
local( $ret ) = &CTCvsLogin( $serve ) ;
if ( $ret ) {
# now issue the add command
$ret = &CTURetCode( system( "cvs -d " . $serve . " rm $_[0]\n" )) ;
}
&CTUDebug( "out of CTCvsRm\n" ) ;
$ret ;
}
# make a versioned directory
# input is in:
# $_[0] = directory to create
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsMkdir {
&CTUDebug( "in CTCvsMkdir\n" ) ;
local( $ret ) = 0 ;
# first make the dir
$ret = &CTURetCode( system( "mkdir $_[0]\n" )) ;
if ( $ret ) {
# now version it
$ret = &CTCvsAdd( $_[0], $_[1], $_[2], $_[3] ) ;
} else {
&CTUDebug( "could not create directory '" . $_[0] . "'\n" ) ;
$ret = 0 ;
}
&CTUDebug( "out of CTCvsMkdir\n" ) ;
$ret ;
}
# make a versioned element
# input is in:
# $_[0] = element to version
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsMkelem {
&CTUDebug( "in CTCvsMkelem\n" ) ;
# first cvs add the file
local( $ret ) = &CTCvsAdd( $_[0], $_[1], $_[2], $_[3] ) ;
if ( $ret ) {
# now commit it
$ret = &CTCvsCi( $_[0], $_[1], $_[2], $_[3] ) ;
} else {
&CTUDebug( "could not CVS add '" . $_[0] . "'\n" ) ;
$ret = 0 ;
}
&CTUDebug( "out of CTCvsMkelem\n" ) ;
$ret ;
}
# delta an element
# input is in:
# $_[0] = element to delta
# $_[1] = project
# $_[2] = spec line
#
# output:
# return success or failure
sub CTCvsDelta {
&CTUDebug( "in CTCvsDelta\n" ) ;
local( $ret ) = 0 ;
# for lack of better idea, this is going to be just checkin for now
if ( -d $_[0] ) {
# we don't version directories in CVS
$ret = 1 ;
} else {
$ret = &CTCvsCi( $_[0], $_[1], $_[2] ) ;
}
&CTUDebug( "out of CTCvsDelta\n" ) ;
$ret ;
}
# checkout an element
# input is in:
# $_[0] = element to checkout
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsCheckout {
&CTUDebug( "in CTCvsCheckout\n" ) ;
local( $ret ) = 1 ;
# for my limited understanding of CVS, there doesn't seem to be any
# 'checkout' for it.
&CTUDebug( "out of CTCvsCheckout\n" ) ;
$ret ;
}
# checkin an element
# input is in:
# $_[0] = element to checkin
# $_[1] = project
# $_[2] = spec line
# $_[3] = possible comment
#
# output:
# return success or failure
sub CTCvsCheckin {
&CTUDebug( "in CTCvsCheckin\n" ) ;
local( $ret ) = 0 ;
if ( -d $_[0] ) {
# we don't version directories in CVS
$ret = 1 ;
} else {
$ret = &CTCvsCi( $_[0], $_[1], $_[2], $_[3] ) ;
}
&CTUDebug( "out of CTCvsCheckin\n" ) ;
$ret ;
}
# uncheckout an element
# input is in:
# $_[0] = element to uncheckout
# $_[1] = project
# $_[2] = spec line
#
# output:
# return success or failure
sub CTCvsUncheckout {
&CTUDebug( "in CTCvsUncheckout\n" ) ;
local( $ret ) = 0 ;
if ( -d $_[0] ) {
# we don't version directories in CVS
$ret = 1 ;
} else {
$ret = &CTURetCode( system( "rm $_[0]" ) ) ;
if ( $ret ) {
local( $serve ) = &CTCvsServerLine( $_[1], $_[2] ) ;
$ret = &CTCvsLogin( $serve ) ;
if ( $ret ) {
$ret = &CTURetCode( system( "cvs -d " . $serve . " update " .
$_[0] )) ;
}
}
}
&CTUDebug( "out of CTCvsUncheckout\n" ) ;
$ret ;
}
# figure out what all I have checked out
# input is in:
# $_[0] = project
# $_[1] = flavor
# $_[2] = spec line
#
# output:
# return a \n serperated list of elements checked out
sub CTCvsIHave {
&CTUDebug( "in CTCvsIHave\n" ) ;
local( $ret ) = "" ;
local( $proj ) = $_[0] ;
$proj =~ tr/a-z/A-Z/ ;
local( $line ) = "cd \$" . $proj . "; " ;
local( $serve ) = &CTCvsServerLine( $_[0], $_[2] ) ;
local( $ok ) = &CTCvsLogin( $serve ) ;
if ( $ok ) {
$line = $line . "cvs -n -d " . $serve . " update 2>/dev/null" ;
local( $hold ) = "";
local( *OUTPUT ) ;
open( OUTPUT, $line . " |" ) ;
while ( <OUTPUT> ) {
$hold = $hold . $_ ;
}
close( OUTPUT ) ;
local( @lines ) = split( /\n/, $hold ) ;
local( $item ) ;
foreach $item ( @lines ) {
if ( $item =~ /^\?/ ) {
# things that start with a ? are ignored
} elsif ( $item =~ /^cvs/ ) {
# messages from the server are also ignored
} elsif ( $item =~ /^P/ ) {
# new files are ignored
} elsif ( $item =~ /^U/ ) {
# updates are ignored
} elsif ( $item =~ /^M/ ) {
# here's one we modified
local( @foo ) = split( / /, $item ) ;
$ret = $ret . $foo[1] . "\n" ;
} else {
# don't what this means, better complain
local( @foo ) = split( / /, $item ) ;
print STDERR "got unknown update code '" . $foo[0] .
"' for file '" . $foo[1] . "'\n" ;
}
}
}
&CTUDebug( "out of CTCvsIHave\n" ) ;
$ret ;
}
# remove an element from the repository
# input is in:
# $_[0] = element to uncheckout
# $_[1] = project
# $_[2] = spec line
#
# output:
# return success or failure
sub CTCvsRmElem {
&CTUDebug( "in CTCvsRmElem\n" ) ;
local( $ret ) = 0 ;
if ( -d $_[0] ) {
# CVS doesn't really do this. If there are no files in the directory,
# the next time an update -P is run, it will be deleted.
$ret = 1 ;
} else {
$ret = &CTURetCode( system( "rm $_[0]" ) ) ;
if ( $ret ) {
$ret = &CTCvsRm( $_[0], $_[1], $_[2] ) ;
if ( $ret ) {
$ret = &CTCvsCi( $_[0], $_[1], $_[2] ) ;
}
}
}
&CTUDebug( "out of CTCvsRmElem\n" ) ;
$ret ;
}
# move a versioned element from one name to another
# input is in:
# $_[0] = from element
# $_[1] = to element
# $_[2] = project
# $_[3] = spec line
#
# output:
# return success or failure
sub CTCvsMv {
&CTUDebug( "in CTCvsMv\n" ) ;
local( $ret ) = 0 ;
if ( -d $_[0] ) {
# don't have code to do directories yet. See pp 54 of the CVS book
$ret = 0 ;
} else {
$ret = &CTURetCode( system( "mv $_[0] $_[1]" ) ) ;
if ( $ret ) {
$ret = &CTCvsRm( $_[0], $_[2], $_[3] ) ;
if ( $ret ) {
$ret = &CTCvsAdd( $_[1], $_[2], $_[3] );
if ( $ret ) {
$ret = &CTCvsCi( $_[0], $_[2], $_[3] ) ;
if ( $ret ) {
$ret = &CTCvsCi( $_[1], $_[2], $_[3] ) ;
}
}
}
}
}
&CTUDebug( "out of CTCvsMv\n" ) ;
$ret ;
}
# build a list of targets
# input is in:
# $_[0] = targets
#
# output:
# return success or failure
sub CTCvsMake {
&CTUDebug( "in CTCvsMake\n" ) ;
local( $ret ) = 0 ;
local( $line ) = "make " . $_[0] . "\n" ;
$ret = &CTURetCode( system( $line )) ;
&CTUDebug( "out of CTCvsMake\n" ) ;
$ret ;
}
1;
| hj3938/panda3d | dtool/src/attach/ctcvs.pl | Perl | bsd-3-clause | 11,153 |
<html>
<body bgcolor="white">
<script language="JavaScript">
// Send a query to the browser process.
function execURLRequest() {
document.getElementById('ta').value = 'Request pending...';
// Results in a call to the OnQuery method in urlrequest_test.cpp
window.cefQuery({
request: 'URLRequestTest:' + document.getElementById("url").value,
onSuccess: function(response) {
document.getElementById('ta').value = response;
},
onFailure: function(error_code, error_message) {
document.getElementById('ta').value = 'Failed with error ' + error_message + ' (' + error_code + ')';
}
});
}
</script>
<form>
URL: <input type="text" id="url" value="http://www.google.com">
<br/><input type="button" onclick="execURLRequest();" value="Execute CefURLRequest">
<br/><textarea rows="10" cols="40" id="ta"></textarea>
</form>
</body>
</html>
| svn2github/ttserver | tests/cefclient/resources/urlrequest.html | HTML | bsd-3-clause | 869 |
import os
from unittest import TestCase
from django.template import Engine
from .utils import TEMPLATE_DIR
class OriginTestCase(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template('index.html')
b = self.engine.get_template('index.html')
self.assertEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, True)
self.assertIs(a.origin != b.origin, False)
def test_origin_compares_not_equal(self):
a = self.engine.get_template('first/test.html')
b = self.engine.get_template('second/test.html')
self.assertNotEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, False)
self.assertIs(a.origin != b.origin, True)
def test_repr(self):
a = self.engine.get_template('index.html')
name = os.path.join(TEMPLATE_DIR, 'index.html')
self.assertEqual(repr(a.origin), '<Origin name=%r>' % name)
| atul-bhouraskar/django | tests/template_tests/test_origin.py | Python | bsd-3-clause | 1,111 |
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ax_action_data.h"
#include "ax_enums.h"
namespace ui {
AXActionData::AXActionData()
: action(ax::mojom::Action::kNone),
hit_test_event_to_fire(ax::mojom::Event::kNone),
horizontal_scroll_alignment(ax::mojom::ScrollAlignment::kNone),
vertical_scroll_alignment(ax::mojom::ScrollAlignment::kNone),
scroll_behavior(ax::mojom::ScrollBehavior::kNone) {}
AXActionData::AXActionData(const AXActionData& other) = default;
AXActionData::~AXActionData() = default;
} // namespace ui
| flutter/engine | third_party/accessibility/ax/ax_action_data.cc | C++ | bsd-3-clause | 682 |
/*
* Copyright (C) 2014 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SVGAnimatedRect_h
#define SVGAnimatedRect_h
#include "core/svg/SVGRectTearOff.h"
#include "core/svg/properties/SVGAnimatedProperty.h"
namespace blink {
class SVGAnimatedRect : public SVGAnimatedProperty<SVGRect> {
public:
static PassRefPtr<SVGAnimatedRect> create(SVGElement* contextElement, const QualifiedName& attributeName)
{
return adoptRef(new SVGAnimatedRect(contextElement, attributeName));
}
protected:
SVGAnimatedRect(SVGElement* contextElement, const QualifiedName& attributeName)
: SVGAnimatedProperty<SVGRect>(contextElement, attributeName, SVGRect::create(SVGRect::InvalidSVGRectTag()))
{
}
};
} // namespace blink
#endif // SVGAnimatedRect_h
| xin3liang/platform_external_chromium_org_third_party_WebKit | Source/core/svg/SVGAnimatedRect.h | C | bsd-3-clause | 2,275 |
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "snapshot/exception_snapshot.h"
#include "snapshot/system_snapshot.h"
#include "snapshot/test/test_process_snapshot.h"
namespace crashpad {
namespace test {
TestProcessSnapshot::TestProcessSnapshot()
: process_id_(0),
parent_process_id_(0),
snapshot_time_(),
process_start_time_(),
process_cpu_user_time_(),
process_cpu_system_time_(),
report_id_(),
client_id_(),
annotations_simple_map_(),
system_(),
threads_(),
modules_(),
exception_(),
memory_map_(),
handles_(),
extra_memory_() {
}
TestProcessSnapshot::~TestProcessSnapshot() {
}
pid_t TestProcessSnapshot::ProcessID() const {
return process_id_;
}
pid_t TestProcessSnapshot::ParentProcessID() const {
return parent_process_id_;
}
void TestProcessSnapshot::SnapshotTime(timeval* snapshot_time) const {
*snapshot_time = snapshot_time_;
}
void TestProcessSnapshot::ProcessStartTime(timeval* start_time) const {
*start_time = process_start_time_;
}
void TestProcessSnapshot::ProcessCPUTimes(timeval* user_time,
timeval* system_time) const {
*user_time = process_cpu_user_time_;
*system_time = process_cpu_system_time_;
}
void TestProcessSnapshot::ReportID(UUID* report_id) const {
*report_id = report_id_;
}
void TestProcessSnapshot::ClientID(UUID* client_id) const {
*client_id = client_id_;
}
const std::map<std::string, std::string>&
TestProcessSnapshot::AnnotationsSimpleMap() const {
return annotations_simple_map_;
}
const SystemSnapshot* TestProcessSnapshot::System() const {
return system_.get();
}
std::vector<const ThreadSnapshot*> TestProcessSnapshot::Threads() const {
std::vector<const ThreadSnapshot*> threads;
for (const ThreadSnapshot* thread : threads_) {
threads.push_back(thread);
}
return threads;
}
std::vector<const ModuleSnapshot*> TestProcessSnapshot::Modules() const {
std::vector<const ModuleSnapshot*> modules;
for (const ModuleSnapshot* module : modules_) {
modules.push_back(module);
}
return modules;
}
std::vector<UnloadedModuleSnapshot> TestProcessSnapshot::UnloadedModules()
const {
return unloaded_modules_;
}
const ExceptionSnapshot* TestProcessSnapshot::Exception() const {
return exception_.get();
}
std::vector<const MemoryMapRegionSnapshot*> TestProcessSnapshot::MemoryMap()
const {
std::vector<const MemoryMapRegionSnapshot*> memory_map;
for (const auto& item : memory_map_)
memory_map.push_back(item);
return memory_map;
}
std::vector<HandleSnapshot> TestProcessSnapshot::Handles() const {
return handles_;
}
std::vector<const MemorySnapshot*> TestProcessSnapshot::ExtraMemory() const {
std::vector<const MemorySnapshot*> extra_memory;
for (const auto& em : extra_memory_)
extra_memory.push_back(em);
return extra_memory;
}
} // namespace test
} // namespace crashpad
| danakj/chromium | third_party/crashpad/crashpad/snapshot/test/test_process_snapshot.cc | C++ | bsd-3-clause | 3,526 |
import django
from django.forms import * # NOQA
from material.base import LayoutMixin as ViewformLayoutMixin
class SourceCodeMixin(object):
def source(self):
import inspect
import itertools
lines = inspect.getsourcelines(self.__class__)[0]
lines = [x for x in itertools.takewhile(lambda x: not x.strip().startswith('template'), lines)]
return ''.join(lines)
class Form(SourceCodeMixin, django.forms.Form):
pass
class LayoutMixin(SourceCodeMixin, ViewformLayoutMixin):
pass
| pombredanne/django-material | tests/demo.py | Python | bsd-3-clause | 533 |
require 'spec_helper'
describe 'API Models' do
before :all do
module Entities
class Something < Grape::Entity
expose :text, documentation: { type: 'string', desc: 'Content of something.' }
expose :links, documentation: { type: 'link', is_array: true }
end
end
module Entities
class EnumValues < Grape::Entity
expose :gender, documentation: { type: 'string', desc: 'Content of something.', values: %w(Male Female) }
expose :number, documentation: { type: 'integer', desc: 'Content of something.', values: proc { [1, 2] } }
end
end
module Entities
module Some
class Thing < Grape::Entity
expose :text, documentation: { type: 'string', desc: 'Content of something.' }
end
end
end
module Entities
class ComposedOf < Grape::Entity
expose :part_text, documentation: { type: 'string', desc: 'Content of composedof.' }
end
class ComposedOfElse < Grape::Entity
def self.entity_name
'composed'
end
expose :part_text, documentation: { type: 'string', desc: 'Content of composedof else.' }
end
class SomeThingElse < Grape::Entity
expose :else_text, documentation: { type: 'string', desc: 'Content of something else.' }
expose :parts, using: Entities::ComposedOf, documentation: { type: 'ComposedOf',
is_array: true,
required: true }
expose :part, using: Entities::ComposedOfElse, documentation: { type: 'composes' }
end
end
module Entities
class AliasedThing < Grape::Entity
expose :something, as: :post, using: Entities::Something, documentation: { type: 'Something', desc: 'Reference to something.' }
end
end
module Entities
class FourthLevel < Grape::Entity
expose :text, documentation: { type: 'string' }
end
class ThirdLevel < Grape::Entity
expose :parts, using: Entities::FourthLevel, documentation: { type: 'FourthLevel' }
end
class SecondLevel < Grape::Entity
expose :parts, using: Entities::ThirdLevel, documentation: { type: 'ThirdLevel' }
end
class FirstLevel < Grape::Entity
expose :parts, using: Entities::SecondLevel, documentation: { type: 'SecondLevel' }
end
end
end
module Entities
class QueryInputElement < Grape::Entity
expose :key, documentation: {
type: String, desc: 'Name of parameter', required: true }
expose :value, documentation: {
type: String, desc: 'Value of parameter', required: true }
end
class QueryInput < Grape::Entity
expose :elements, using: Entities::QueryInputElement, documentation: {
type: 'QueryInputElement',
desc: 'Set of configuration',
param_type: 'body',
is_array: true,
required: true
}
end
class QueryResult < Grape::Entity
expose :elements_size, documentation: { type: Integer, desc: 'Return input elements size' }
end
end
module Entities
class ThingWithRoot < Grape::Entity
root 'things', 'thing'
expose :text, documentation: { type: 'string', desc: 'Content of something.' }
end
end
def app
Class.new(Grape::API) do
format :json
desc 'This gets something.', entity: Entities::Something
get '/something' do
something = OpenStruct.new text: 'something'
present something, with: Entities::Something
end
desc 'This gets thing.', entity: Entities::Some::Thing
get '/thing' do
thing = OpenStruct.new text: 'thing'
present thing, with: Entities::Some::Thing
end
desc 'This gets somthing else.', entity: Entities::SomeThingElse
get '/somethingelse' do
part = OpenStruct.new part_text: 'part thing'
thing = OpenStruct.new else_text: 'else thing', parts: [part], part: part
present thing, with: Entities::SomeThingElse
end
desc 'This tests the enum values in params and documentation.', entity: Entities::EnumValues, params: Entities::EnumValues.documentation
get '/enum_description_in_entity' do
enum_value = OpenStruct.new gender: 'Male', number: 1
present enum_value, with: Entities::EnumValues
end
desc 'This gets an aliased thing.', entity: Entities::AliasedThing
get '/aliasedthing' do
something = OpenStruct.new(something: OpenStruct.new(text: 'something'))
present something, with: Entities::AliasedThing
end
desc 'This gets all nested entities.', entity: Entities::FirstLevel
get '/nesting' do
fourth_level = OpenStruct.new text: 'something'
third_level = OpenStruct.new parts: [fourth_level]
second_level = OpenStruct.new parts: [third_level]
first_level = OpenStruct.new parts: [second_level]
present first_level, with: Entities::FirstLevel
end
desc 'This tests diffrent entity for input and diffrent for output',
entity: [Entities::QueryResult, Entities::QueryInput],
params: Entities::QueryInput.documentation
get '/multiple_entities' do
result = OpenStruct.new(elements_size: params[:elements].size)
present result, with: Entities::QueryResult
end
desc 'This gets thing_with_root.', entity: Entities::ThingWithRoot
get '/thing_with_root' do
thing = OpenStruct.new text: 'thing'
present thing, with: Entities::ThingWithRoot
end
add_swagger_documentation
end
end
context 'swagger_doc' do
subject do
get '/swagger_doc'
JSON.parse(last_response.body)
end
it 'returns a swagger-compatible doc' do
expect(subject).to include(
'apiVersion' => '0.1',
'swaggerVersion' => '1.2',
'info' => {},
'produces' => ['application/json']
)
end
it 'documents apis' do
expect(subject['apis']).to eq [
{ 'path' => '/something.{format}', 'description' => 'Operations about somethings' },
{ 'path' => '/thing.{format}', 'description' => 'Operations about things' },
{ 'path' => '/somethingelse.{format}', 'description' => 'Operations about somethingelses' },
{ 'path' => '/enum_description_in_entity.{format}', 'description' => 'Operations about enum_description_in_entities' },
{ 'path' => '/aliasedthing.{format}', 'description' => 'Operations about aliasedthings' },
{ 'path' => '/nesting.{format}', 'description' => 'Operations about nestings' },
{ 'path' => '/multiple_entities.{format}', 'description' => 'Operations about multiple_entities' },
{ 'path' => '/thing_with_root.{format}', 'description' => 'Operations about thing_with_roots' },
{ 'path' => '/swagger_doc.{format}', 'description' => 'Operations about swagger_docs' }
]
end
end
it 'returns type' do
get '/swagger_doc/something'
result = JSON.parse(last_response.body)
expect(result['apis'].first['operations'].first['type']).to eq 'Something'
end
it 'includes nested type' do
get '/swagger_doc/thing'
result = JSON.parse(last_response.body)
expect(result['apis'].first['operations'].first['type']).to eq 'Some::Thing'
end
it 'includes entities which are only used as composition' do
get '/swagger_doc/somethingelse'
result = JSON.parse(last_response.body)
expect(result['apis'][0]['path']).to start_with '/somethingelse'
expect(result['models']['SomeThingElse']).to include('id' => 'SomeThingElse',
'properties' => {
'else_text' => {
'type' => 'string',
'description' => 'Content of something else.'
},
'parts' => {
'type' => 'array',
'items' => { '$ref' => 'ComposedOf' }
},
'part' => { '$ref' => 'composes' }
},
'required' => ['parts']
)
expect(result['models']['ComposedOf']).to include(
'id' => 'ComposedOf',
'properties' => {
'part_text' => {
'type' => 'string',
'description' => 'Content of composedof.'
}
}
)
expect(result['models']['composed']).to include(
'id' => 'composed',
'properties' => {
'part_text' => {
'type' => 'string',
'description' => 'Content of composedof else.'
}
}
)
end
it 'includes enum values in params and documentation.' do
get '/swagger_doc/enum_description_in_entity'
result = JSON.parse(last_response.body)
expect(result['models']['EnumValues']).to eq(
'id' => 'EnumValues',
'properties' => {
'gender' => { 'type' => 'string', 'description' => 'Content of something.', 'enum' => %w(Male Female) },
'number' => { 'type' => 'integer', 'format' => 'int32', 'description' => 'Content of something.', 'enum' => [1, 2] }
}
)
expect(result['apis'][0]['operations'][0]).to include(
'parameters' =>
[
{ 'paramType' => 'query', 'name' => 'gender', 'description' => 'Content of something.', 'type' => 'string', 'required' => false, 'allowMultiple' => false, 'enum' => %w(Male Female) },
{ 'paramType' => 'query', 'name' => 'number', 'description' => 'Content of something.', 'type' => 'integer', 'required' => false, 'allowMultiple' => false, 'format' => 'int32', 'enum' => [1, 2] }
],
'type' => 'EnumValues'
)
end
it 'includes referenced models in those with aliased references.' do
get '/swagger_doc/aliasedthing'
result = JSON.parse(last_response.body)
expect(result['models']['AliasedThing']).to eq(
'id' => 'AliasedThing',
'properties' => {
'post' => { '$ref' => 'Something', 'description' => 'Reference to something.' }
}
)
expect(result['models']['Something']).to eq(
'id' => 'Something',
'properties' => {
'text' => { 'type' => 'string', 'description' => 'Content of something.' },
'links' => { 'type' => 'array', 'items' => { '$ref' => 'link' } }
}
)
end
it 'includes all entities with four levels of nesting' do
get '/swagger_doc/nesting'
result = JSON.parse(last_response.body)
expect(result['models']).to include('FirstLevel', 'SecondLevel', 'ThirdLevel', 'FourthLevel')
end
it 'includes all entities while using multiple entities' do
get '/swagger_doc/multiple_entities'
result = JSON.parse(last_response.body)
expect(result['models']).to include('QueryInput', 'QueryInputElement', 'QueryResult')
end
it 'includes an id equal to the model name' do
get '/swagger_doc/thing_with_root'
result = JSON.parse(last_response.body)
expect(result['models']['thing']['id']).to eq('thing')
end
end
| jhollinger/grape-swagger | spec/api_models_spec.rb | Ruby | mit | 11,596 |
// -- Sammy.js -- /plugins/sammy.handlebars.js
// http://sammyjs.org
// Version: 0.7.5
// Built: 2014-02-22 10:57:12 +0200
(function(factory){if(typeof define==="function"&&define.amd){define(["jquery","sammy","handlebars"],factory)}else{(window.Sammy=window.Sammy||{}).Handlebars=factory(window.jQuery,window.Sammy)}})(function($,Sammy,Handlebars){Handlebars=Handlebars||window.Handlebars;Sammy.Handlebars=function(app,method_alias){var handlebars_cache={};var handlebars=function(template,data,partials,name){if(typeof name=="undefined"){name=template}var fn=handlebars_cache[name];if(!fn){fn=handlebars_cache[name]=Handlebars.compile(template)}data=$.extend({},this,data);partials=$.extend({},data.partials,partials);return fn(data,{partials:partials})};if(!method_alias){method_alias="handlebars"}app.helper(method_alias,handlebars)};return Sammy.Handlebars}); | dragmove/SAMPLE_EJS_MONGODB | public/js/lib/sammy/plugins/sammy.handlebars-latest.min.js | JavaScript | mit | 864 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<script src="../../list.js"></script>
<script src="../../page.js"></script>
<link type="text/css" rel="stylesheet" href="../../page.css" />
</head>
<body>
<h1>[name]</h1>
<div class="desc">Math utility functions</div>
<h2>Methods</h2>
<h3>.clamp( [page:Float x], [page:Float a], [page:Float b] ) [page:Float]</h3>
<div>
x — Value to be clamped.<br />
a — Minimum value<br />
b — Maximum value.
</div>
<div>
Clamps the *x* to be between *a* and *b*.
</div>
<h3>.clampBottom( [page:Float x], [page:Float a] ) [page:Float]</h3>
<div>
x — Value to be clamped.<br />
a — Minimum value
</div>
<div>
Clamps the *x* to be larger than *a*.
</div>
<h3>.mapLinear( [page:Float x], [page:Float a] ) [page:Float]</h3>
<div>
x — Value to be mapped.<br />
a1 — Minimum value for range A.<br />
a2 — Maximum value for range A.<br />
b1 — Minimum value for range B.<br />
b2 — Maximum value for range B.
</div>
<div>
Linear mapping of *x* from range [*a1*, *a2*] to range [*b1*, *b2*].
</div>
<h3>.random16() [page:Float]</h3>
<div>
Random float from 0 to 1 with 16 bits of randomness.<br />
Standard Math.random() creates repetitive patterns when applied over larger space.
</div>
<h3>.randInt( [page:Integer low], [page:Integer high] ) [page:Integer]</h3>
<div>
Random integer from *low* to *high* interval.
</div>
<h3>.randFloat( [page:Float low], [page:Float high] ) [page:Float]</h3>
<div>
Random float from *low* to *high* interval.
</div>
<h3>.randFloatSpread( [page:Float range] ) [page:Float]</h3>
<div>
Random float from *- range / 2* to *range / 2* interval.
</div>
<h3>.sign( [page:Float x] ) [page:Float]</h3>
<div>
Returns -1 if *x* is less than 0, 1 if *x* is greater than 0, and 0 if *x* is zero.
</div>
<h2>Source</h2>
[link:https://github.com/mrdoob/three.js/blob/master/src/[path].js src/[path].js]
</body>
</html>
| pixelsandcandy/sandbox-js | threeJS/mrdoob-three.js-3b4457e/docs/api/math/Math.html | HTML | mit | 2,033 |
/*
Copyright (c) 2012 Luis E. S. Dias - www.smartbyte.com.br
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
body {
font-family: Arial;
}
#reportManagerDisplay .reportName {
font: normal normal bold 19pt;
color: #333333;
}
#reportManagerDisplay .timestamp {
margin-top: 15px;
font: normal normal 8pt;
color: #333333;
float: left;
clear: both;
}
#reportManagerDisplay .report
{
margin: 0px;
padding: 0px;
border-top-width: 0px;
border-bottom-width: 1px;
border-left-width: 1px;
border-right-width: 1px;
border-style: solid;
border-color: #cccccc;
border-collapse: collapse;
font: normal normal bold 11pt;
color: #ffffff;
}
#reportManagerDisplay .report tr.header th
{
background: #333333;
border-top-width: 0px;
border-bottom-width: 1px;
border-left-width: 0px;
border-right-width: 0px;
border-style: solid;
border-color: #eeeeee;
font: normal normal 10pt;
color: #ffffff;
text-align: left;
}
#reportManagerDisplay .report tr.body td
{
background: #FFFFFF;
border-top-width: 0px;
border-bottom-width: 1px;
border-left-width: 1px;
border-right-width: 1px;
border-style: solid;
border-color: #CCCCCC;
text-align: left;
font: normal normal 9pt;
color: #555555;
}
#reportManagerDisplay .report tr.footer td
{
background: #FFFFFF;
border-top-width: 0px;
border-bottom-width: 1px;
border-left-width: 1px;
border-right-width: 1px;
border-style: solid;
border-color: #CCCCCC;
font: normal normal 9pt;
color: #555555;
text-align: left;
} | luisdias/CakePHP-1.3x-Report-Manager-Plugin | webroot/css/ledger.css | CSS | mit | 2,629 |
## Python
These settings apply only when `--python` is specified on the command line.
Please also specify `--python-sdks-folder=<path to the root directory of your azure-sdk-for-python clone>`.
Use `--python-mode=update` if you already have a setup.py and just want to update the code itself.
``` yaml $(python)
python-mode: create
python:
azure-arm: true
license-header: MICROSOFT_MIT_NO_VERSION
payload-flattening-threshold: 2
namespace: azure.mgmt.cdn
package-name: azure-mgmt-cdn
package-version: 2.0.0
clear-output-folder: true
```
``` yaml $(python) && $(python-mode) == 'update'
python:
no-namespace-folders: true
output-folder: $(python-sdks-folder)/azure-mgmt-cdn/azure/mgmt/cdn
```
``` yaml $(python) && $(python-mode) == 'create'
python:
basic-setup-py: true
output-folder: $(python-sdks-folder)/azure-mgmt-cdn
```
| olydis/azure-rest-api-specs | specification/cdn/resource-manager/readme.python.md | Markdown | mit | 850 |
/*
* Title: CloudSim Toolkit
* Description: CloudSim (Cloud Simulation) Toolkit for Modeling and Simulation of Clouds
* Licence: GPL - http://www.gnu.org/copyleft/gpl.html
*
* Copyright (c) 2009-2012, The University of Melbourne, Australia
*/
package org.cloudbus.cloudsim.provisioners;
import java.util.List;
import org.cloudbus.cloudsim.Vm;
/**
* The Class PeProvisioner.
*
* @author Anton Beloglazov
* @since CloudSim Toolkit 2.0
*/
public abstract class PeProvisioner {
/** The mips. */
private double mips;
/** The available mips. */
private double availableMips;
/**
* Creates the new PeProvisioner.
*
* @param mips overall amount of MIPS available in the Pe
*
* @pre mips>=0
* @post $none
*/
public PeProvisioner(double mips) {
setMips(mips);
setAvailableMips(mips);
}
/**
* Allocates MIPS for a given VM.
*
* @param vm virtual machine for which the MIPS are being allocated
* @param mips the mips
*
* @return $true if the MIPS could be allocated; $false otherwise
*
* @pre $none
* @post $none
*/
public abstract boolean allocateMipsForVm(Vm vm, double mips);
/**
* Allocates MIPS for a given VM.
*
* @param vmUid the vm uid
* @param mips the mips
*
* @return $true if the MIPS could be allocated; $false otherwise
*
* @pre $none
* @post $none
*/
public abstract boolean allocateMipsForVm(String vmUid, double mips);
/**
* Allocates MIPS for a given VM.
*
* @param vm virtual machine for which the MIPS are being allocated
* @param mips the mips for each virtual Pe
*
* @return $true if the MIPS could be allocated; $false otherwise
*
* @pre $none
* @post $none
*/
public abstract boolean allocateMipsForVm(Vm vm, List<Double> mips);
/**
* Gets allocated MIPS for a given VM.
*
* @param vm virtual machine for which the MIPS are being allocated
*
* @return array of allocated MIPS
*
* @pre $none
* @post $none
*/
public abstract List<Double> getAllocatedMipsForVm(Vm vm);
/**
* Gets total allocated MIPS for a given VM for all PEs.
*
* @param vm virtual machine for which the MIPS are being allocated
*
* @return total allocated MIPS
*
* @pre $none
* @post $none
*/
public abstract double getTotalAllocatedMipsForVm(Vm vm);
/**
* Gets allocated MIPS for a given VM for a given virtual Pe.
*
* @param vm virtual machine for which the MIPS are being allocated
* @param peId the pe id
*
* @return allocated MIPS
*
* @pre $none
* @post $none
*/
public abstract double getAllocatedMipsForVmByVirtualPeId(Vm vm, int peId);
/**
* Releases MIPS used by a VM.
*
* @param vm the vm
*
* @pre $none
* @post none
*/
public abstract void deallocateMipsForVm(Vm vm);
/**
* Releases MIPS used by all VMs.
*
* @pre $none
* @post none
*/
public void deallocateMipsForAllVms() {
setAvailableMips(getMips());
}
/**
* Gets the MIPS.
*
* @return the MIPS
*/
public double getMips() {
return mips;
}
/**
* Sets the MIPS.
*
* @param mips the MIPS to set
*/
public void setMips(double mips) {
this.mips = mips;
}
/**
* Gets the available MIPS in the PE.
*
* @return available MIPS
*
* @pre $none
* @post $none
*/
public double getAvailableMips() {
return availableMips;
}
/**
* Sets the available MIPS.
*
* @param availableMips the availableMips to set
*/
protected void setAvailableMips(double availableMips) {
this.availableMips = availableMips;
}
/**
* Gets the total allocated MIPS.
*
* @return the total allocated MIPS
*/
public double getTotalAllocatedMips() {
double totalAllocatedMips = getMips() - getAvailableMips();
if (totalAllocatedMips > 0) {
return totalAllocatedMips;
}
return 0;
}
/**
* Gets the utilization of the Pe in percents.
*
* @return the utilization
*/
public double getUtilization() {
return getTotalAllocatedMips() / getMips();
}
}
| daitr-gu/cloudsim_simulator_v2.0 | src/org/cloudbus/cloudsim/provisioners/PeProvisioner.java | Java | mit | 3,996 |
<!DOCTYPE html>
<html lang="en"><head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<link rel="icon" HREF="favicon.ico">
<title>DataSet - vis.js - A dynamic, browser based visualization library.</title>
<!-- Bootstrap core CSS -->
<link href="../css/bootstrap.css" rel="stylesheet">
<!-- Tipue vendor css -->
<link href="../css/tipuesearch.css" rel="stylesheet">
<link href="../css/style.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
<link href="../css/prettify.css" type="text/css" rel="stylesheet"/>
<script type="text/javascript" src="../js/googleAnalytics.js"></script>
<script type="text/javascript" src="../js/prettify/prettify.js"></script>
<script src="../js/smooth-scroll.min.js"></script>
<script language="JavaScript">
smoothScroll.init();
</script>
<script type="text/javascript" src="../js/toggleTable.js"></script>
</head>
<body onload="prettyPrint();">
<div class="navbar-wrapper">
<div class="container">
<nav class="navbar navbar-inverse navbar-static-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand hidden-sm" href="./index.html">vis.js</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li><a href="http://www.visjs.org/index.html#modules">Modules</a></li>
<li><a href="http://www.visjs.org/blog.html">Blog</a></li>
<li><a href="http://www.visjs.org/index.html#download_install">Download</a></li>
<li><a href="http://www.visjs.org/showcase/index.html">Showcase</a></li>
<li><a href="http://www.visjs.org/index.html#contribute">Contribute</a></li>
<li><a href="http://www.visjs.org/featureRequests.html">Feature requests</a></li>
<li><a href="http://www.visjs.org/index.html#licenses">License</a></li>
</ul>
<form class="navbar-form navbar-right" role="search">
<input name="q" id="tipue_search_input" autocomplete="off" type="text" class="form-control" placeholder="Enter keywords">
<button type="submit" class="btn btn-default">Go!</button>
</form>
<div id="search-results-wrapper" class="panel panel-default">
<div class="panel-body">
<div id="tipue_search_content"></div>
</div>
</div>
<div id="keyword-info" class="panel panel-success">
<div class="panel-body">
Found <span id="keyword-count"></span> results. Click <a id="keyword-jumper-button" href="">here</a> to jump to the first keyword occurence!
</div>
</div>
</div>
</div>
</nav>
</div>
</div>
<a href="https://github.com/almende/vis" class="hidden-xs hidden-sm hidden-md"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/38ef81f8aca64bb9a64448d0d70f1308ef5341ab/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f6461726b626c75655f3132313632312e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png"></a>
<div class="container full">
<h1>DataSet</h1>
<h2 id="Contents">Contents</h2>
<ul>
<li><a href="#Overview">Overview</a></li>
<li><a href="#Example">Example</a></li>
<li><a href="#Construction">Construction</a></li>
<li><a href="#Methods">Methods</a></li>
<li><a href="#Properties">Properties</a></li>
<li><a href="#Subscriptions">Subscriptions</a></li>
<li><a href="#Data_Manipulation">Data Manipulation</a></li>
<li><a href="#Data_Selection">Data Selection</a></li>
</ul>
<h2 id="Overview">Overview</h2>
<p>
Vis.js comes with a flexible DataSet, which can be used to hold and
manipulate unstructured data and listen for changes in the data.
The DataSet is key/value based. Data items can be added, updated and
removed from the DataSet, and one can subscribe to changes in the DataSet.
The data in the DataSet can be filtered and ordered, and fields (like
dates) can be converted to a specific type. Data can be normalized when
appending it to the DataSet as well.
</p>
<h2 id="Example">Example</h2>
<p>
The following example shows how to use a DataSet.
</p>
<pre class="prettyprint lang-js">
// create a DataSet
var options = {};
var data = new vis.DataSet(options);
// add items
// note that the data items can contain different properties and data formats
data.add([
{id: 1, text: 'item 1', date: new Date(2013, 6, 20), group: 1, first: true},
{id: 2, text: 'item 2', date: '2013-06-23', group: 2},
{id: 3, text: 'item 3', date: '2013-06-25', group: 2},
{id: 4, text: 'item 4'}
]);
// subscribe to any change in the DataSet
data.on('*', function (event, properties, senderId) {
console.log('event', event, properties);
});
// update an existing item
data.update({id: 2, group: 1});
// remove an item
data.remove(4);
// get all ids
var ids = data.getIds();
console.log('ids', ids);
// get a specific item
var item1 = data.get(1);
console.log('item1', item1);
// retrieve a filtered subset of the data
var items = data.get({
filter: function (item) {
return item.group == 1;
}
});
console.log('filtered items', items);
// retrieve formatted items
var items = data.get({
fields: ['id', 'date'],
type: {
date: 'ISODate'
}
});
console.log('formatted items', items);
</pre>
<h2 id="Construction">Construction</h2>
<p>
A DataSet can be constructed as:
</p>
<pre class="prettyprint lang-js">
var data = new vis.DataSet([data] [, options])
</pre>
<p>
After construction, data can be added to the DataSet using the methods
<code>add</code> and <code>update</code>, as described in section
<a href="#Data_Manipulation">Data Manipulation</a>.
</p>
<p>
The parameter <code>data</code> is optional and is an Array with items.
</p>
<p>
The parameter <code>options</code> is optional and is an object which can
contain the following properties:
</p>
<table class="options">
<tr>
<th>Name</th>
<th>Type</th>
<th>Default value</th>
<th>Description</th>
</tr>
<tr>
<td>fieldId</td>
<td>String</td>
<td>"id"</td>
<td>
The name of the field containing the id of the items.
When data is fetched from a server which uses some specific
field to identify items, this field name can be specified
in the DataSet using the option <code>fieldId</code>.
For example <a href="http://couchdb.apache.org/"
target="_blank">CouchDB</a> uses the field
<code>"_id"</code> to identify documents.
</td>
</tr>
<tr>
<td>type</td>
<td>Object.<String, String></td>
<td>none</td>
<td>
An object containing field names as key, and data types as
value. By default, the type of the properties of items are left
unchanged. Item properties can be normalized by specifying a
field type. This is useful for example to automatically convert
stringified dates coming from a server into JavaScript Date
objects. The available data types are listed in section
<a href="#Data_Types">Data Types</a>.
</td>
</tr>
<tr>
<td>queue</td>
<td>Object | boolean</td>
<td>none</td>
<td>
Queue data changes ('add', 'update', 'remove') and flush them at once.
The queue can be flushed manually by calling
<code>DataSet.flush()</code>, or can be flushed after a configured delay
or maximum number of entries.
<br>
<br>
When <code>queue</code> is true, a queue is created
with default options. Options can be specified by providing an object:
<ul>
<li><code>delay: number</code><br>
The queue will be flushed automatically after an inactivity of this
delay in milliseconds. Default value is <code>null</code>.
<li><code>max: number</code><br>
When the queue exceeds the given maximum number
of entries, the queue is flushed automatically.
Default value is <code>Infinity</code>.
</li>
</ul>
</td>
</tr>
</table>
<h2 id="Methods">Methods</h2>
<p>DataSet contains the following methods.</p>
<table class="methods">
<tr>
<th>Method</th>
<th>Return Type</th>
<th>Description</th>
</tr>
<tr>
<td>add(data [, senderId])</td>
<td>Number[]</td>
<td>Add one or multiple items to the DataSet. <code>data</code> can be a single item or an array with items. Adding an item will fail when there already is an item with the same id. The function returns an array with the ids of the added items. See section <a href="#Data_Manipulation">Data Manipulation</a>.</td>
</tr>
<tr>
<td>clear([senderId])</td>
<td>Number[]</td>
<td>Clear all data from the DataSet. The function returns an array with the ids of the removed items.</td>
</tr>
<tr>
<td>distinct(field)</td>
<td>Array</td>
<td>Find all distinct values of a specified field. Returns an unordered array containing all distinct values. If data items do not contain the specified field are ignored.</td>
</tr>
<tr>
<td>flush()</td>
<td>none</td>
<td>Flush queued changes. Only available when the DataSet is configured with the option <code>queue</code>, see section <a href="#Construction">Construction</a>.</td>
</tr>
<tr>
<td>forEach(callback [, options])</td>
<td>none</td>
<td>
Execute a callback function for every item in the dataset.
The available options are described in section <a href="#Data_Selection">Data Selection</a>.
</td>
</tr>
<tr>
<td>
get([options] [, data])<br>
get(id [,options] [, data])<br>
get(ids [, options] [, data])
</td>
<td>Object | Array</td>
<td>
Get a single item, multiple items, or all items from the DataSet.
Usage examples can be found in section <a href="#Getting_Data">Getting Data</a>, and the available <code>options</code> are described in section <a href="#Data_Selection">Data Selection</a>. When no item is found, <code>null</code> is returned when a single item was requested, and and empty Array is returned in case of multiple id's.
</td>
</tr>
<tr>
<td>
getDataSet()
</td>
<td>DataSet</td>
<td>
Get the DataSet itself. In case of a DataView, this function does not
return the DataSet to which the DataView is connected.
</td>
</tr>
<tr>
<td>
getIds([options])
</td>
<td>Number[]</td>
<td>
Get ids of all items or of a filtered set of items.
Available <code>options</code> are described in section <a href="#Data_Selection">Data Selection</a>, except that options <code>fields</code> and <code>type</code> are not applicable in case of <code>getIds</code>.
</td>
</tr>
<tr>
<td>map(callback [, options])</td>
<td>Array</td>
<td>
Map every item in the DataSet.
The available options are described in section <a href="#Data_Selection">Data Selection</a>.
</td>
</tr>
<tr>
<td>max(field)</td>
<td>Object | null</td>
<td>
Find the item with maximum value of specified field. Returns <code>null</code> if no item is found.
</td>
</tr>
<tr>
<td>min(field)</td>
<td>Object | null</td>
<td>
Find the item with minimum value of specified field. Returns <code>null</code> if no item is found.
</td>
</tr>
<tr>
<td>off(event, callback)</td>
<td>none</td>
<td>
Unsubscribe from an event, remove an event listener. See section <a href="#Subscriptions">Subscriptions</a>.
</td>
</tr>
<tr>
<td>on(event, callback)</td>
<td>none</td>
<td>
Subscribe to an event, add an event listener. See section <a href="#Subscriptions">Subscriptions</a>.
</td>
</tr>
<tr>
<td>
remove(id [, senderId])<br>
remove(ids [, senderId])
</td>
<td>Number[]</td>
<td>
Remove one or multiple items by id or by the items themselves. Returns an array with the ids of the removed items. See section <a href="#Data_Manipulation">Data Manipulation</a>.
</td>
</tr>
<tr>
<td>
setOptions(options)
</td>
<td>none</td>
<td>
Set options for the DataSet. Available options:
<ul>
<li>
<code>queue</code><br>
Queue data changes ('add', 'update', 'remove') and flush them at once.
The queue can be flushed manually by calling
<code>DataSet.flush()</code>, or can be flushed after a configured delay
or maximum number of entries.
<br>
<br>
When <code>queue</code> is true, a queue is created with default options.
When <code>queue</code> is false, an existing queue will be flushed and removed.
Options can be specified by providing an object:
<ul>
<li><code>delay: number</code><br>
The queue will be flushed automatically after an inactivity of this
delay in milliseconds. Default value is <code>null</code>.
<li><code>max: number</code><br>
When the queue exceeds the given maximum number
of entries, the queue is flushed automatically.
Default value is <code>Infinity</code>.
</li>
</ul>
</li>
</ul>
</td>
</tr>
<tr>
<td>
update(data [, senderId])
</td>
<td>Number[]</td>
<td>
Update one or multiple existing items. <code>data</code> can be a single item or an array with items. When an item doesn't exist, it will be created. Returns an array with the ids of the removed items. See section <a href="#Data_Manipulation">Data Manipulation</a>.
</td>
</tr>
</table>
<h2 id="Properties">Properties</h2>
<p>DataSet contains the following properties.</p>
<table>
<tr>
<th>Property</th>
<th>Type</th>
<th>Description</th>
</tr>
<tr>
<td>length</td>
<td>Number</td>
<td>The number of items in the DataSet.</td>
</tr>
</table>
<h2 id="Subscriptions">Subscriptions</h2>
<p>
One can subscribe on changes in a DataSet.
A subscription can be created using the method <code>on</code>,
and removed with <code>off</code>.
</p>
<pre class="prettyprint lang-js">
// create a DataSet
var data = new vis.DataSet();
// subscribe to any change in the DataSet
data.on('*', function (event, properties, senderId) {
console.log('event:', event, 'properties:', properties, 'senderId:', senderId);
});
// add an item
data.add({id: 1, text: 'item 1'}); // triggers an 'add' event
data.update({id: 1, text: 'item 1 (updated)'}); // triggers an 'update' event
data.remove(1); // triggers an 'remove' event
</pre>
<h3 id="On">On</h3>
<p>
Subscribe to an event.
</p>
Syntax:
<pre class="prettyprint lang-js">DataSet.on(event, callback)</pre>
Where:
<ul>
<li>
<code>event</code> is a String containing any of the events listed
in section <a href="#Events">Events</a>.
</li>
<li>
<code>callback</code> is a callback function which will be called
each time the event occurs. The callback function is described in
section <a href="#Callback">Callback</a>.
</li>
</ul>
<h3 id="Off">Off</h3>
<p>
Unsubscribe from an event.
</p>
Syntax:
<pre class="prettyprint lang-js">DataSet.off(event, callback)</pre>
Where <code>event</code> and <code>callback</code> correspond with the
parameters used to <a href="#On">subscribe</a> to the event.
<h3 id="Events">Events</h3>
<p>
The following events are available for subscription:
</p>
<table>
<tr>
<th>Event</th>
<th>Description</th>
</tr>
<tr>
<td>add</td>
<td>
The <code>add</code> event is triggered when an item
or a set of items is added, or when an item is updated while
not yet existing.
</td>
</tr>
<tr>
<td>update</td>
<td>
The <code>update</code> event is triggered when an existing item
or a set of existing items is updated.
</td>
</tr>
<tr>
<td>remove</td>
<td>
The <code>remove</code> event is triggered when an item
or a set of items is removed.
</td>
</tr>
<tr>
<td>*</td>
<td>
The <code>*</code> event is triggered when any of the events
<code>add</code>, <code>update</code>, and <code>remove</code>
occurs.
</td>
</tr>
</table>
<h3 id="Callback">Callback</h3>
<p>
The callback functions of subscribers are called with the following
parameters:
</p>
<pre class="prettyprint lang-js">
function (event, properties, senderId) {
// handle the event
});
</pre>
<p>
where the parameters are defined as
</p>
<table>
<tr>
<th>Parameter</th>
<th>Type</th>
<th>Description</th>
</tr>
<tr>
<td>event</td>
<td>String</td>
<td>
Any of the available events: <code>add</code>,
<code>update</code>, or <code>remove</code>.
</td>
</tr>
<tr>
<td>properties</td>
<td>Object | null</td>
<td>
Optional properties providing more information on the event.
In case of the events <code>add</code>,
<code>update</code>, and <code>remove</code>,
<code>properties</code> is always an object containing a property
<code>items</code>, which contains an array with the ids of the affected
items. The <code>update</code> and <code>remove</code> events have an extra
field <code>oldData</code> containing the original data of the items in the
dataset before the items were updated or removed. The <code>update</code>
event also contains a field <code>data</code> containing the changes:
the properties of the items that are being updated.
</td>
</tr>
<tr>
<td>senderId</td>
<td>String | Number</td>
<td>
An senderId, optionally provided by the application code
which triggered the event. If senderId is not provided, the
argument will be <code>null</code>.
</td>
</tr>
</table>
<h2 id="Data_Manipulation">Data Manipulation</h2>
<p>
The data in a DataSet can be manipulated using the methods
<a href="#Add"><code>add</code></a>,
<a href="#Update"><code>update</code></a>,
and <a href="#Remove"><code>remove</code></a>.
The DataSet can be emptied using the method
<a href="#Clear"><code>clear</code></a>.
</p>
<pre class="prettyprint lang-js">
// create a DataSet
var data = new vis.DataSet();
// add items
data.add([
{id: 1, text: 'item 1'},
{id: 2, text: 'item 2'},
{id: 3, text: 'item 3'}
]);
// update an item
data.update({id: 2, text: 'item 2 (updated)'});
// remove an item
data.remove(3);
</pre>
<h3 id="Add">Add</h3>
<p>
Add a data item or an array with items.
</p>
Syntax:
<pre class="prettyprint lang-js">var addedIds = DataSet.add(data [, senderId])</pre>
The argument <code>data</code> can contain:
<ul>
<li>
An <code>Object</code> containing a single item to be
added. The item must contain an id.
</li>
<li>
An <code>Array</code> containing a list with items to be added. Each item must contain an id.
</li>
</ul>
<p>
After the items are added to the DataSet, the DataSet will
trigger an event <code>add</code>. When a <code>senderId</code>
is provided, this id will be passed with the triggered
event to all subscribers.
</p>
<p>
The method will throw an Error when an item with the same id
as any of the added items already exists.
</p>
<h3 id="Update">Update</h3>
<p>
Update a data item or an array with items.
</p>
Syntax:
<pre class="prettyprint lang-js">var updatedIds = DataSet.update(data [, senderId])</pre>
The argument <code>data</code> can contain:
<ul>
<li>
An <code>Object</code> containing a single item to be
updated. The item must contain an id.
</li>
<li>
An <code>Array</code> containing a list with items to be updated. Each item must contain an id.
</li>
</ul>
<p>
The provided properties will be merged in the existing item.
When an item does not exist, it will be created.
</p>
<p>
After the items are updated, the DataSet will
trigger an event <code>add</code> for the added items, and
an event <code>update</code>. When a <code>senderId</code>
is provided, this id will be passed with the triggered
event to all subscribers.
</p>
<h3 id="Remove">Remove</h3>
<p>
Remove a data item or an array with items.
</p>
Syntax:
<pre class="prettyprint lang-js">var removedIds = DataSet.remove(id [, senderId])</pre>
<p>
The argument <code>id</code> can be:
</p>
<ul>
<li>
A <code>Number</code> or <code>String</code> containing the id
of a single item to be removed.
</li>
<li>
An <code>Object</code> containing the item to be deleted.
The item will be deleted by its id.
</li>
<li>
An Array containing ids or items to be removed.
</li>
</ul>
<p>
The method ignores removal of non-existing items, and returns an array
containing the ids of the items which are actually removed from the
DataSet.
</p>
<p>
After the items are removed, the DataSet will
trigger an event <code>remove</code> for the removed items.
When a <code>senderId</code> is provided, this id will be passed with
the triggered event to all subscribers.
</p>
<h3 id="Clear">Clear</h3>
<p>
Clear the complete DataSet.
</p>
Syntax:
<pre class="prettyprint lang-js">var removedIds = DataSet.clear([senderId])</pre>
<p>
After the items are removed, the DataSet will
trigger an event <code>remove</code> for all removed items.
When a <code>senderId</code> is provided, this id will be passed with
the triggered event to all subscribers.
</p>
<h2 id="Data_Selection">Data Selection</h2>
<p>
The DataSet contains functionality to format, filter, and sort data retrieved via the
methods <code>get</code>, <code>getIds</code>, <code>forEach</code>, and <code>map</code>. These methods have the following syntax:
</p>
<pre class="prettyprint lang-js">
DataSet.get([id] [, options]);
DataSet.getIds([options]);
DataSet.forEach(callback [, options]);
DataSet.map(callback [, options]);
</pre>
<p>
Where <code>options</code> is an Object which can have the following
properties:
</p>
<table class="properties">
<tr>
<th>Name</th>
<th>Type</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>fields</td>
<td>String[ ] | Object.<String, String></td>
<td>no</td>
<td>
An array with field names, or an object with current field name and
new field name that the field is returned as.
By default, all properties of the items are emitted.
When <code>fields</code> is defined, only the properties
whose name is specified in <code>fields</code> will be included
in the returned items.
</td>
</tr>
<tr>
<td>type</td>
<td>Object.<String, String></td>
<td>no</td>
<td>
An object containing field names as key, and data types as value.
By default, the type of the properties of an item are left
unchanged. When a field type is specified, this field in the
items will be converted to the specified type. This can be used
for example to convert ISO strings containing a date to a
JavaScript Date object, or convert strings to numbers or vice
versa. The available data types are listed in section
<a href="#Data_Types">Data Types</a>.
</td>
</tr>
<tr>
<td>filter</td>
<td>Function</td>
<td>no</td>
<td>Items can be filtered on specific properties by providing a filter
function. A filter function is executed for each of the items in the
DataSet, and is called with the item as parameter. The function must
return a boolean. All items for which the filter function returns
true will be emitted.
See section <a href="#Data_Filtering">Data Filtering</a>.</td>
</tr>
<tr>
<td>order</td>
<td>String | Function</td>
<td>no</td>
<td>Order the items by a field name or custom sort function.</td>
</tr>
<tr>
<td>returnType</td>
<td>String</td>
<td>no</td>
<td>Determine the type of output of the get function. Allowed values are <code>'Array' | 'Object'</code>.
The default returnType is an Array. The Object type will return a JSON object with the ID's as keys.</td>
</tr>
</table>
<p>
The following example demonstrates formatting properties and filtering
properties from items.
</p>
<pre class="prettyprint lang-js">
// create a DataSet
var data = new vis.DataSet();
data.add([
{id: 1, text: 'item 1', date: '2013-06-20', group: 1, first: true},
{id: 2, text: 'item 2', date: '2013-06-23', group: 2},
{id: 3, text: 'item 3', date: '2013-06-25', group: 2},
{id: 4, text: 'item 4'}
]);
// retrieve formatted items
var items = data.get({
fields: ['id', 'date', 'group'], // output the specified fields only
type: {
date: 'Date', // convert the date fields to Date objects
group: 'String' // convert the group fields to Strings
}
});
</pre>
<h3 id="Getting_Data">Getting Data</h3>
<p>
Data can be retrieved from the DataSet using the method <code>get</code>.
This method can return a single item or a list with items.
</p>
<p>A single item can be retrieved by its id:</p>
<pre class="prettyprint lang-js">
var item1 = dataset.get(1);
</pre>
<p>A selection of items can be retrieved by providing an array with ids:</p>
<pre class="prettyprint lang-js">
var items = dataset.get([1, 3, 4]); // retrieve items 1, 3, and 4
</pre>
<p>All items can be retrieved by simply calling <code>get</code> without
specifying an id:</p>
<pre class="prettyprint lang-js">
var items = dataset.get(); // retrieve all items
</pre>
<h3 id="Data_Filtering">Data Filtering</h3>
<p>
Items can be filtered on specific properties by providing a filter
function. A filter function is executed for each of the items in the
DataSet, and is called with the item as parameter. The function must
return a boolean. All items for which the filter function returns
true will be emitted.
</p>
<pre class="prettyprint lang-js">
// retrieve all items having a property group with value 2
var group2 = dataset.get({
filter: function (item) {
return (item.group == 2);
}
});
// retrieve all items having a property balance with a value above zero
var positiveBalance = dataset.get({
filter: function (item) {
return (item.balance > 0);
}
});
</pre>
<h3 id="Data_Types">Data Types</h3>
<p>
DataSet supports the following data types:
</p>
<table class="datatypes">
<tr>
<th>Name</th>
<th>Description</th>
<th>Examples</th>
</tr>
<tr>
<td>Boolean</td>
<td>A JavaScript Boolean</td>
<td>
<code>true</code><br>
<code>false</code>
</td>
</tr>
<tr>
<td>Number</td>
<td>A JavaScript Number</td>
<td>
<code>32</code><br>
<code>2.4</code>
</td>
</tr>
<tr>
<td>String</td>
<td>A JavaScript String</td>
<td>
<code>"hello world"</code><br>
<code>"2013-06-28"</code>
</td>
</tr>
<tr>
<td>Date</td>
<td>A JavaScript Date object</td>
<td>
<code>new Date()</code><br>
<code>new Date(2013, 5, 28)</code><br>
<code>new Date(1372370400000)</code>
</td>
</tr>
<tr>
<td>Moment</td>
<td>A Moment object, created with
<a href="http://momentjs.com/" target="_blank">moment.js</a></td>
<td>
<code>moment()</code><br>
<code>moment('2013-06-28')</code>
</td>
</tr>
<tr>
<td>ISODate</td>
<td>A string containing an ISO Date</td>
<td>
<code>new Date().toISOString()</code><br>
<code>"2013-06-27T22:00:00.000Z"</code>
</td>
</tr>
<tr>
<td>ASPDate</td>
<td>A string containing an ASP Date</td>
<td>
<code>"/Date(1372370400000)/"</code><br>
<code>"/Date(1198908717056-0700)/"</code>
</td>
</tr>
</table>
</div>
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="../js/jquery.min.js"></script>
<script src="../js/bootstrap.min.js"></script>
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="../js/ie10-viewport-bug-workaround.js"></script>
<!-- jquery extensions -->
<script src="../js/jquery.highlight.js"></script>
<script src="../js/jquery.url.min.js"></script>
<!-- Tipue vendor js -->
<script src="../js/tipuesearch.config.js"></script>
<script src="../js/tipuesearch.js"></script>
<!-- controller -->
<script src="../js/main.js"></script>
| AnttiKurittu/kirjuri | views/js/vis-4.18.0/docs/data/dataset.html | HTML | mit | 31,355 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Collections.Generic;
using Xunit;
namespace System.Text.Tests
{
public class UnicodeEncodingEncode
{
public static IEnumerable<object[]> Encode_TestData()
{
// All ASCII chars
for (int i = 0; i <= byte.MaxValue; i++)
{
char c = (char)i;
yield return new object[] { "a" + c + "b", 0, 3, new byte[] { 97, 0, (byte)c, 0, 98, 0 } };
yield return new object[] { "a" + c + "b", 1, 1, new byte[] { (byte)c, 0 } };
yield return new object[] { "a" + c + "b", 2, 1, new byte[] { 98, 0 } };
}
// Unicode
yield return new object[] { "a\u1234b", 0, 3, new byte[] { 97, 0, 52, 18, 98, 0 } };
yield return new object[] { "a\u1234b", 1, 1, new byte[] { 52, 18 } };
// Surrogate pairs
yield return new object[] { "\uD800\uDC00", 0, 2, new byte[] { 0, 216, 0, 220 } };
yield return new object[] { "a\uD800\uDC00b", 0, 4, new byte[] { 97, 0, 0, 216, 0, 220, 98, 0 } };
yield return new object[] { "\uD800\uDC00\uFFFD\uFEB7", 0, 4, new byte[] { 0x00, 0xD8, 0x00, 0xDC, 0xFD, 0xFF, 0xB7, 0xFE } };
// Mixture of ASCII and Unicode
yield return new object[] { "FooBA\u0400R", 0, 7, new byte[] { 70, 0, 111, 0, 111, 0, 66, 0, 65, 0, 0, 4, 82, 0 } };
yield return new object[] { "\u00C0nima\u0300l", 0, 7, new byte[] { 192, 0, 110, 0, 105, 0, 109, 0, 97, 0, 0, 3, 108, 0 } };
yield return new object[] { "Test\uD803\uDD75Test", 0, 10, new byte[] { 84, 0, 101, 0, 115, 0, 116, 0, 3, 216, 117, 221, 84, 0, 101, 0, 115, 0, 116, 0 } };
yield return new object[] { "\uD803\uDD75\uD803\uDD75\uD803\uDD75", 0, 6, new byte[] { 3, 216, 117, 221, 3, 216, 117, 221, 3, 216, 117, 221 } };
yield return new object[] { "\u0130", 0, 1, new byte[] { 48, 1 } };
yield return new object[] { "za\u0306\u01fd\u03b2", 0, 5, new byte[] { 122, 0, 97, 0, 6, 3, 253, 1, 178, 3 } };
yield return new object[] { "za\u0306\u01FD\u03B2\uD8FF\uDCFF", 0, 7, new byte[] { 122, 0, 97, 0, 6, 3, 253, 1, 178, 3, 255, 216, 255, 220 } };
yield return new object[] { "za\u0306\u01FD\u03B2\uD8FF\uDCFF", 4, 3, new byte[] { 178, 3, 255, 216, 255, 220 } };
// Empty strings
yield return new object[] { string.Empty, 0, 0, new byte[0] };
yield return new object[] { "a\u1234b", 3, 0, new byte[0] };
yield return new object[] { "a\u1234b", 0, 0, new byte[0] };
}
[Theory]
[MemberData(nameof(Encode_TestData))]
public void Encode(string source, int index, int count, byte[] expectedLittleEndian)
{
byte[] expectedBigEndian = GetBigEndianBytes(expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, true, false), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, false, false), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, true, false), source, index, count, expectedBigEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, false, false), source, index, count, expectedBigEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, true, true), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, false, true), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, true, true), source, index, count, expectedBigEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, false, true), source, index, count, expectedBigEndian);
}
public void Encode_InvalidChars(string source, int index, int count, byte[] expectedLittleEndian)
{
byte[] expectedBigEndian = GetBigEndianBytes(expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, true, false), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(false, false, false), source, index, count, expectedLittleEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, true, false), source, index, count, expectedBigEndian);
EncodingHelpers.Encode(new UnicodeEncoding(true, false, false), source, index, count, expectedBigEndian);
NegativeEncodingTests.Encode_Invalid(new UnicodeEncoding(false, true, true), source, index, count);
NegativeEncodingTests.Encode_Invalid(new UnicodeEncoding(false, false, true), source, index, count);
NegativeEncodingTests.Encode_Invalid(new UnicodeEncoding(true, true, true), source, index, count);
NegativeEncodingTests.Encode_Invalid(new UnicodeEncoding(true, false, true), source, index, count);
}
[Fact]
public void Encode_InvalidChars()
{
// TODO: add into Encode_TestData or Encode_InvalidChars_TestData once #7166 is fixed
byte[] unicodeReplacementBytes1 = new byte[] { 253, 255 };
Encode_InvalidChars("\uD800", 0, 1, unicodeReplacementBytes1); // Lone high surrogate
Encode_InvalidChars("\uDC00", 0, 1, unicodeReplacementBytes1); // Lone low surrogate
// Surrogate pair out of range
Encode_InvalidChars("\uD800\uDC00", 0, 1, unicodeReplacementBytes1);
Encode_InvalidChars("\uD800\uDC00", 1, 1, unicodeReplacementBytes1);
Encode_InvalidChars("\uDBFF\uDFFF", 0, 1, unicodeReplacementBytes1);
Encode_InvalidChars("\uDBFF\uDFFF", 1, 1, unicodeReplacementBytes1);
byte[] unicodeReplacementBytes2 = new byte[] { 253, 255, 253, 255 };
Encode_InvalidChars("\uD800\uD800", 0, 2, unicodeReplacementBytes2); // High, high
Encode_InvalidChars("\uDC00\uD800", 0, 2, unicodeReplacementBytes2); // Low, high
Encode_InvalidChars("\uDC00\uDC00", 0, 2, unicodeReplacementBytes2); // Low, low
// Mixture of ASCII, valid Unicode and invalid Unicode
Encode_InvalidChars("Test\uD803Test", 0, 9, new byte[] { 84, 0, 101, 0, 115, 0, 116, 0, 253, 255, 84, 0, 101, 0, 115, 0, 116, 0 });
Encode_InvalidChars("Test\uDD75Test", 0, 9, new byte[] { 84, 0, 101, 0, 115, 0, 116, 0, 253, 255, 84, 0, 101, 0, 115, 0, 116, 0 });
Encode_InvalidChars("TestTest\uDD75", 0, 9, new byte[] { 84, 0, 101, 0, 115, 0, 116, 0, 84, 0, 101, 0, 115, 0, 116, 0, 253, 255 });
Encode_InvalidChars("TestTest\uD803", 0, 9, new byte[] { 84, 0, 101, 0, 115, 0, 116, 0, 84, 0, 101, 0, 115, 0, 116, 0, 253, 255 });
Encode_InvalidChars("\uDD75", 0, 1, new byte[] { 253, 255 });
Encode_InvalidChars("\uDD75\uDD75\uD803\uDD75\uDD75\uDD75\uDD75\uD803\uD803\uD803\uDD75\uDD75\uDD75\uDD75", 0, 14, new byte[] { 253, 255, 253, 255, 3, 216, 117, 221, 253, 255, 253, 255, 253, 255, 253, 255, 253, 255, 3, 216, 117, 221, 253, 255, 253, 255, 253, 255 });
// High BMP non-chars
Encode("\uFFFD", 0, 1, unicodeReplacementBytes1);
Encode("\uFFFE", 0, 1, new byte[] { 254, 255 });
Encode("\uFFFF", 0, 1, new byte[] { 255, 255 });
Encode("\uFFFF\uFFFE", 0, 2, new byte[] { 0xFF, 0xFF, 0xFE, 0xFF });
}
[Fact]
public unsafe void GetByteCount_OverlyLargeCount_ThrowsArgumentOutOfRangeException()
{
UnicodeEncoding encoding = new UnicodeEncoding();
fixed (char* pChars = "abc")
{
char* pCharsLocal = pChars;
Assert.Throws<ArgumentOutOfRangeException>("count", () => encoding.GetByteCount(pCharsLocal, int.MaxValue / 2 + 1));
}
}
public static byte[] GetBigEndianBytes(byte[] littleEndianBytes)
{
byte[] bigEndianBytes = (byte[])littleEndianBytes.Clone();
for (int i = 0; i < bigEndianBytes.Length; i += 2)
{
byte b1 = bigEndianBytes[i];
byte b2 = bigEndianBytes[i + 1];
bigEndianBytes[i] = b2;
bigEndianBytes[i + 1] = b1;
}
return bigEndianBytes;
}
}
}
| ellismg/corefx | src/System.Text.Encoding/tests/UnicodeEncoding/UnicodeEncodingEncode.cs | C# | mit | 8,555 |
<?php namespace CodeIgniter\Database\Live;
/**
* @group DatabaseLive
*/
class CIDbTestCaseTest extends \CIDatabaseTestCase
{
protected $refresh = true;
protected $seed = 'CITestSeeder';
public function testHasInDatabase()
{
$this->hasInDatabase('user', ['name' => 'Ricky', 'email' => '[email protected]', 'country' => 'US']);
$this->seeInDatabase('user', ['name' => 'Ricky', 'email' => '[email protected]', 'country' => 'US']);
}
//--------------------------------------------------------------------
public function testDontSeeInDatabase()
{
$this->dontSeeInDatabase('user', ['name' => 'Ricardo']);
}
//--------------------------------------------------------------------
public function testSeeNumRecords()
{
$this->seeNumRecords(2, 'user', ['country' => 'US']);
}
//--------------------------------------------------------------------
public function testGrabFromDatabase()
{
$email = $this->grabFromDatabase('user', 'email', ['name' => 'Derek Jones']);
$this->assertEquals('[email protected]', $email);
}
//--------------------------------------------------------------------
} | wuzheng40/CodeIgniter4 | tests/system/Database/Live/CIDbTestCaseTest.php | PHP | mit | 1,134 |
// AFOAuthCredential.h
//
// Copyright (c) 2012-2014 AFNetworking (http://afnetworking.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
/**
`AFOAuthCredential` models the credentials returned from an OAuth server, storing the token type, access & refresh tokens, and whether the token is expired.
OAuth credentials can be stored in the user's keychain, and retrieved on subsequent launches.
*/
@interface AFOAuthCredential : NSObject <NSCoding>
///--------------------------------------
/// @name Accessing Credential Properties
///--------------------------------------
/**
The OAuth access token.
*/
@property (readonly, nonatomic, copy) NSString *accessToken;
/**
The OAuth token type (e.g. "bearer").
*/
@property (readonly, nonatomic, copy) NSString *tokenType;
/**
The OAuth refresh token.
*/
@property (readonly, nonatomic, copy) NSString *refreshToken;
/**
Whether the OAuth credentials are expired.
*/
@property (readonly, nonatomic, assign, getter = isExpired) BOOL expired;
///--------------------------------------------
/// @name Creating and Initializing Credentials
///--------------------------------------------
/**
Create an OAuth credential from a token string, with a specified type.
@param token The OAuth token string.
@param type The OAuth token type.
*/
+ (instancetype)credentialWithOAuthToken:(NSString *)token
tokenType:(NSString *)type;
/**
Initialize an OAuth credential from a token string, with a specified type.
@param token The OAuth token string.
@param type The OAuth token type.
*/
- (id)initWithOAuthToken:(NSString *)token
tokenType:(NSString *)type;
///----------------------------
/// @name Setting Refresh Token
///----------------------------
/**
Set the credential refresh token, without a specific expiration
@param refreshToken The OAuth refresh token.
*/
- (void)setRefreshToken:(NSString *)refreshToken;
/**
Set the expiration on the access token. If no expiration is given by the OAuth2 provider,
you may pass in [NSDate distantFuture]
@param expiration The expiration of the access token. This must not be `nil`.
*/
- (void)setExpiration:(NSDate *)expiration;
/**
Set the credential refresh token, with a specified expiration.
@param refreshToken The OAuth refresh token.
@param expiration The expiration of the access token. This must not be `nil`.
*/
- (void)setRefreshToken:(NSString *)refreshToken
expiration:(NSDate *)expiration;
///-----------------------------------------
/// @name Storing and Retrieving Credentials
///-----------------------------------------
/**
Stores the specified OAuth credential for a given web service identifier in the Keychain.
with the default Keychain Accessibilty of kSecAttrAccessibleWhenUnlocked.
@param credential The OAuth credential to be stored.
@param identifier The service identifier associated with the specified credential.
@return Whether or not the credential was stored in the keychain.
*/
+ (BOOL)storeCredential:(AFOAuthCredential *)credential
withIdentifier:(NSString *)identifier;
/**
Stores the specified OAuth token for a given web service identifier in the Keychain.
@param credential The OAuth credential to be stored.
@param identifier The service identifier associated with the specified token.
@param securityAccessibility The Keychain security accessibility to store the credential with.
@return Whether or not the credential was stored in the keychain.
*/
+ (BOOL)storeCredential:(AFOAuthCredential *)credential
withIdentifier:(NSString *)identifier
withAccessibility:(id)securityAccessibility;
/**
Retrieves the OAuth credential stored with the specified service identifier from the Keychain.
@param identifier The service identifier associated with the specified credential.
@return The retrieved OAuth credential.
*/
+ (nullable AFOAuthCredential *)retrieveCredentialWithIdentifier:(NSString *)identifier;
/**
Deletes the OAuth credential stored with the specified service identifier from the Keychain.
@param identifier The service identifier associated with the specified credential.
@return Whether or not the credential was deleted from the keychain.
*/
+ (BOOL)deleteCredentialWithIdentifier:(NSString *)identifier;
@end
NS_ASSUME_NONNULL_END
| Jasonette/JASONETTE-iOS | app/Pods/AFOAuth2Manager/AFOAuth2Manager/AFOAuthCredential.h | C | mit | 5,425 |
#
# Specifying rufus-scheduler
#
# Fri Nov 1 05:56:03 JST 2013
#
# Ishinomaki
#
require 'spec_helper'
describe Rufus::Scheduler do
class LosingLockScheduler < Rufus::Scheduler
attr_reader :counter
def initialize
super
@counter = 0
end
def confirm_lock
@counter = @counter + 1
false
end
end
context 'custom locks' do
it 'does not trigger when #confirm_lock returns false' do
s = LosingLockScheduler.new
count = 0
s.in('0s') { count = count + 1 }
sleep 0.7
expect(count).to eq(0)
expect(s.counter).to eq(1)
end
end
end
| jmettraux/rufus-scheduler | spec/lock_custom_spec.rb | Ruby | mit | 630 |
<?php
namespace GuzzleHttp\Promise\Tests;
use GuzzleHttp\Promise\Coroutine;
use GuzzleHttp\Promise\Promise;
use GuzzleHttp\Promise\PromiseInterface;
use PHPUnit_Framework_TestCase;
use ReflectionClass;
class CoroutineTest extends PHPUnit_Framework_TestCase
{
/**
* @dataProvider promiseInterfaceMethodProvider
*
* @param string $method
* @param array $args
*/
public function testShouldProxyPromiseMethodsToResultPromise($method, $args = [])
{
$coroutine = new Coroutine(function () { yield 0; });
$mockPromise = $this->getMockForAbstractClass(PromiseInterface::class);
call_user_func_array([$mockPromise->expects($this->once())->method($method), 'with'], $args);
$resultPromiseProp = (new ReflectionClass(Coroutine::class))->getProperty('result');
$resultPromiseProp->setAccessible(true);
$resultPromiseProp->setValue($coroutine, $mockPromise);
call_user_func_array([$coroutine, $method], $args);
}
public function promiseInterfaceMethodProvider()
{
return [
['then', [null, null]],
['otherwise', [function () {}]],
['wait', [true]],
['getState', []],
['resolve', [null]],
['reject', [null]],
];
}
public function testShouldCancelResultPromiseAndOutsideCurrentPromise()
{
$coroutine = new Coroutine(function () { yield 0; });
$mockPromises = [
'result' => $this->getMockForAbstractClass(PromiseInterface::class),
'currentPromise' => $this->getMockForAbstractClass(PromiseInterface::class),
];
foreach ($mockPromises as $propName => $mockPromise) {
/**
* @var $mockPromise \PHPUnit_Framework_MockObject_MockObject
*/
$mockPromise->expects($this->once())
->method('cancel')
->with();
$promiseProp = (new ReflectionClass(Coroutine::class))->getProperty($propName);
$promiseProp->setAccessible(true);
$promiseProp->setValue($coroutine, $mockPromise);
}
$coroutine->cancel();
}
public function testWaitShouldResolveChainedCoroutines()
{
$promisor = function () {
return \GuzzleHttp\Promise\coroutine(function () {
yield $promise = new Promise(function () use (&$promise) {
$promise->resolve(1);
});
});
};
$promise = $promisor()->then($promisor)->then($promisor);
$this->assertSame(1, $promise->wait());
}
public function testWaitShouldHandleIntermediateErrors()
{
$promise = \GuzzleHttp\Promise\coroutine(function () {
yield $promise = new Promise(function () use (&$promise) {
$promise->resolve(1);
});
})
->then(function () {
return \GuzzleHttp\Promise\coroutine(function () {
yield $promise = new Promise(function () use (&$promise) {
$promise->reject(new \Exception);
});
});
})
->otherwise(function (\Exception $error = null) {
if (!$error) {
self::fail('Error did not propagate.');
}
return 3;
});
$this->assertSame(3, $promise->wait());
}
}
| hiadone/mobipopcon | vendor/guzzlehttp/promises/tests/CoroutineTest.php | PHP | mit | 3,414 |
/*
* images.js: Implementation of Joyent Images Client.
*
* (C) 2012 Charlie Robbins, Ken Perkins, Ross Kukulinski & the Contributors.
*
*/
var pkgcloud = require('../../../../../lib/pkgcloud'),
base = require('../../../core/compute'),
errs = require('errs'),
compute = pkgcloud.providers.joyent.compute;
//
// ### function getImages (callback)
// #### @callback {function} f(err, images). `images` is an array that
// represents the images that are available to your account
//
// Lists all images available to your account.
//
exports.getImages = function getImages(callback) {
var self = this;
return this._request({
path: this.account + '/datasets'
}, function (err, body, res) {
return err
? callback(err)
: callback(null, body.map(function (result) {
return new compute.Image(self, result);
}), res);
});
};
// ### function getImage (image, callback)
// #### @image {Image|String} Image id or an Image
// #### @callback {function} f(err, image). `image` is an object that
// represents the image that was retrieved.
//
// Gets a specified image of Joyent DataSets using the provided details
// object.
//
exports.getImage = function getImage(image, callback) {
var self = this,
imageId = image instanceof base.Image ? image.id : image;
// joyent decided to add spaces to their identifiers
imageId = encodeURIComponent(imageId);
return this._request({
path: this.account + '/datasets/' + imageId
}, function (err, body, res) {
return err
? callback(err)
: callback(null, new compute.Image(self, body), res);
});
};
//
// ### function createImage(options, callback)
// #### @id {Object} an object literal with options
// #### @name {String} String name of the image
// #### @server {Boolean} the server to use
// #### @callback {function} f(err, image). `image` is an object that
// represents the image that was created.
//
// Creates an image in Joyent based on a server
//
exports.createImage = function createImage(options, callback) {
return errs.handle(
errs.create({ message: 'Not supported by joyent' }),
callback
);
};
//
// ### function destroyImage(image, callback)
// #### @image {Image|String} Image id or an Image
// #### @callback {function} f(err, image). `image` is an object that
// represents the image that was deleted.
//
// Destroys an image in Joyent
//
exports.destroyImage = function destroyImage(image, callback) {
return errs.handle(
errs.create({ message: 'Not supported by joyent' }),
callback
);
}; | allankv/bdq-framework-slides | node_modules/pkgcloud/lib/pkgcloud/joyent/compute/client/images.js | JavaScript | mit | 2,594 |
/**
* @license Highcharts JS v6.1.0 (2018-04-13)
* Exporting module
*
* (c) 2010-2017 Torstein Honsi
*
* License: www.highcharts.com/license
*/
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
/**
* (c) 2010-2017 Christer Vasseng, Torstein Honsi
*
* License: www.highcharts.com/license
*/
/**
* @typedef {Object} AjaxSettings
* @property {String} url - The URL to call
* @property {('get'|'post'|'update'|'delete')} type - The verb to use
* @property {('json'|'xml'|'text'|'octet')} dataType - The data type expected
* @property {Function} success - Function to call on success
* @property {Function} error - Function to call on error
* @property {Object} data - The payload to send
* @property {Object} headers - The headers; keyed on header name
*/
/**
* Perform an Ajax call.
*
* @memberof Highcharts
* @param {AjaxSettings} - The Ajax settings to use
*
*/
H.ajax = function (attr) {
var options = H.merge(true, {
url: false,
type: 'GET',
dataType: 'json',
success: false,
error: false,
data: false,
headers: {}
}, attr),
headers = {
json: 'application/json',
xml: 'application/xml',
text: 'text/plain',
octet: 'application/octet-stream'
},
r = new XMLHttpRequest();
function handleError(xhr, err) {
if (options.error) {
options.error(xhr, err);
} else {
// Maybe emit a highcharts error event here
}
}
if (!options.url) {
return false;
}
r.open(options.type.toUpperCase(), options.url, true);
r.setRequestHeader(
'Content-Type',
headers[options.dataType] || headers.text
);
H.objectEach(options.headers, function (val, key) {
r.setRequestHeader(key, val);
});
r.onreadystatechange = function () {
var res;
if (r.readyState === 4) {
if (r.status === 200) {
res = r.responseText;
if (options.dataType === 'json') {
try {
res = JSON.parse(res);
} catch (e) {
return handleError(r, e);
}
}
return options.success && options.success(res);
}
handleError(r, r.responseText);
}
};
try {
options.data = JSON.stringify(options.data);
} catch (e) {}
r.send(options.data || true);
};
}(Highcharts));
(function (Highcharts) {
/**
* Experimental data export module for Highcharts
*
* (c) 2010-2017 Torstein Honsi
*
* License: www.highcharts.com/license
*/
// @todo
// - Set up systematic tests for all series types, paired with tests of the data
// module importing the same data.
var defined = Highcharts.defined,
each = Highcharts.each,
pick = Highcharts.pick,
win = Highcharts.win,
doc = win.document,
seriesTypes = Highcharts.seriesTypes,
downloadAttrSupported = doc.createElement('a').download !== undefined;
// Can we add this to utils? Also used in screen-reader.js
/**
* HTML encode some characters vulnerable for XSS.
* @param {string} html The input string
* @return {string} The excaped string
*/
function htmlencode(html) {
return html
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''')
.replace(/\//g, '/');
}
Highcharts.setOptions({
/**
* @optionparent exporting
*/
exporting: {
/**
* Export-data module required. Caption for the data table. Same as
* chart title by default. Set to `false` to disable.
*
* @type {Boolean|String}
* @since 6.0.4
* @sample highcharts/export-data/multilevel-table
* Multiple table headers
* @default undefined
* @apioption exporting.tableCaption
*/
/**
* Options for exporting data to CSV or ExCel, or displaying the data
* in a HTML table or a JavaScript structure. Requires the
* `export-data.js` module. This module adds data export options to the
* export menu and provides functions like `Chart.getCSV`,
* `Chart.getTable`, `Chart.getDataRows` and `Chart.viewData`.
*
* @sample highcharts/export-data/categorized/ Categorized data
* @sample highcharts/export-data/stock-timeaxis/ Highstock time axis
*
* @since 6.0.0
*/
csv: {
/**
* Formatter callback for the column headers. Parameters are:
* - `item` - The series or axis object)
* - `key` - The point key, for example y or z
* - `keyLength` - The amount of value keys for this item, for
* example a range series has the keys `low` and `high` so the
* key length is 2.
*
* If [useMultiLevelHeaders](#exporting.useMultiLevelHeaders) is
* true, columnHeaderFormatter by default returns an object with
* columnTitle and topLevelColumnTitle for each key. Columns with
* the same topLevelColumnTitle have their titles merged into a
* single cell with colspan for table/Excel export.
*
* If `useMultiLevelHeaders` is false, or for CSV export, it returns
* the series name, followed by the key if there is more than one
* key.
*
* For the axis it returns the axis title or "Category" or
* "DateTime" by default.
*
* Return `false` to use Highcharts' proposed header.
*
* @sample highcharts/export-data/multilevel-table
* Multiple table headers
* @type {Function|null}
*/
columnHeaderFormatter: null,
/**
* Which date format to use for exported dates on a datetime X axis.
* See `Highcharts.dateFormat`.
*/
dateFormat: '%Y-%m-%d %H:%M:%S',
/**
* Which decimal point to use for exported CSV. Defaults to the same
* as the browser locale, typically `.` (English) or `,` (German,
* French etc).
* @type {String}
* @since 6.0.4
*/
decimalPoint: null,
/**
* The item delimiter in the exported data. Use `;` for direct
* exporting to Excel. Defaults to a best guess based on the browser
* locale. If the locale _decimal point_ is `,`, the `itemDelimiter`
* defaults to `;`, otherwise the `itemDelimiter` defaults to `,`.
*
* @type {String}
*/
itemDelimiter: null,
/**
* The line delimiter in the exported data, defaults to a newline.
*/
lineDelimiter: '\n'
},
/**
* Export-data module required. Show a HTML table below the chart with
* the chart's current data.
*
* @sample highcharts/export-data/showtable/ Show the table
* @since 6.0.0
*/
showTable: false,
/**
* Export-data module required. Use multi level headers in data table.
* If [csv.columnHeaderFormatter](#exporting.csv.columnHeaderFormatter)
* is defined, it has to return objects in order for multi level headers
* to work.
*
* @sample highcharts/export-data/multilevel-table
* Multiple table headers
* @since 6.0.4
*/
useMultiLevelHeaders: true,
/**
* Export-data module required. If using multi level table headers, use
* rowspans for headers that have only one level.
*
* @sample highcharts/export-data/multilevel-table
* Multiple table headers
* @since 6.0.4
*/
useRowspanHeaders: true
},
/**
* @optionparent lang
*/
lang: {
/**
* Export-data module only. The text for the menu item.
* @since 6.0.0
*/
downloadCSV: 'Download CSV',
/**
* Export-data module only. The text for the menu item.
* @since 6.0.0
*/
downloadXLS: 'Download XLS',
/**
* Export-data module only. The text for the menu item.
* @since 6.1.0
*/
openInCloud: 'Open in Highcharts Cloud',
/**
* Export-data module only. The text for the menu item.
* @since 6.0.0
*/
viewData: 'View data table'
}
});
// Add an event listener to handle the showTable option
Highcharts.addEvent(Highcharts.Chart, 'render', function () {
if (
this.options &&
this.options.exporting &&
this.options.exporting.showTable
) {
this.viewData();
}
});
// Set up key-to-axis bindings. This is used when the Y axis is datetime or
// categorized. For example in an arearange series, the low and high values
// sholud be formatted according to the Y axis type, and in order to link them
// we need this map.
Highcharts.Chart.prototype.setUpKeyToAxis = function () {
if (seriesTypes.arearange) {
seriesTypes.arearange.prototype.keyToAxis = {
low: 'y',
high: 'y'
};
}
};
/**
* Export-data module required. Returns a two-dimensional array containing the
* current chart data.
*
* @param {Boolean} multiLevelHeaders
* Use multilevel headers for the rows by default. Adds an extra row
* with top level headers. If a custom columnHeaderFormatter is
* defined, this can override the behavior.
*
* @returns {Array.<Array>}
* The current chart data
*/
Highcharts.Chart.prototype.getDataRows = function (multiLevelHeaders) {
var time = this.time,
csvOptions = (this.options.exporting && this.options.exporting.csv) ||
{},
xAxis,
xAxes = this.xAxis,
rows = {},
rowArr = [],
dataRows,
topLevelColumnTitles = [],
columnTitles = [],
columnTitleObj,
i,
x,
xTitle,
// Options
columnHeaderFormatter = function (item, key, keyLength) {
if (csvOptions.columnHeaderFormatter) {
var s = csvOptions.columnHeaderFormatter(item, key, keyLength);
if (s !== false) {
return s;
}
}
if (!item) {
return 'Category';
}
if (item instanceof Highcharts.Axis) {
return (item.options.title && item.options.title.text) ||
(item.isDatetimeAxis ? 'DateTime' : 'Category');
}
if (multiLevelHeaders) {
return {
columnTitle: keyLength > 1 ? key : item.name,
topLevelColumnTitle: item.name
};
}
return item.name + (keyLength > 1 ? ' (' + key + ')' : '');
},
xAxisIndices = [];
// Loop the series and index values
i = 0;
this.setUpKeyToAxis();
each(this.series, function (series) {
var keys = series.options.keys,
pointArrayMap = keys || series.pointArrayMap || ['y'],
valueCount = pointArrayMap.length,
xTaken = !series.requireSorting && {},
categoryMap = {},
datetimeValueAxisMap = {},
xAxisIndex = Highcharts.inArray(series.xAxis, xAxes),
mockSeries,
j;
// Map the categories for value axes
each(pointArrayMap, function (prop) {
var axisName = (
(series.keyToAxis && series.keyToAxis[prop]) ||
prop
) + 'Axis';
categoryMap[prop] = (
series[axisName] &&
series[axisName].categories
) || [];
datetimeValueAxisMap[prop] = (
series[axisName] &&
series[axisName].isDatetimeAxis
);
});
if (
series.options.includeInCSVExport !== false &&
!series.options.isInternal &&
series.visible !== false // #55
) {
// Build a lookup for X axis index and the position of the first
// series that belongs to that X axis. Includes -1 for non-axis
// series types like pies.
if (!Highcharts.find(xAxisIndices, function (index) {
return index[0] === xAxisIndex;
})) {
xAxisIndices.push([xAxisIndex, i]);
}
// Compute the column headers and top level headers, usually the
// same as series names
j = 0;
while (j < valueCount) {
columnTitleObj = columnHeaderFormatter(
series,
pointArrayMap[j],
pointArrayMap.length
);
columnTitles.push(
columnTitleObj.columnTitle || columnTitleObj
);
if (multiLevelHeaders) {
topLevelColumnTitles.push(
columnTitleObj.topLevelColumnTitle || columnTitleObj
);
}
j++;
}
mockSeries = {
chart: series.chart,
autoIncrement: series.autoIncrement,
options: series.options,
pointArrayMap: series.pointArrayMap
};
// Export directly from options.data because we need the uncropped
// data (#7913), and we need to support Boost (#7026).
each(series.options.data, function eachData(options, pIdx) {
var key,
prop,
val,
point;
point = { series: mockSeries };
series.pointClass.prototype.applyOptions.apply(
point,
[options]
);
key = point.x;
if (xTaken) {
if (xTaken[key]) {
key += '|' + pIdx;
}
xTaken[key] = true;
}
j = 0;
if (!rows[key]) {
// Generate the row
rows[key] = [];
// Contain the X values from one or more X axes
rows[key].xValues = [];
}
rows[key].x = point.x;
rows[key].xValues[xAxisIndex] = point.x;
// Pies, funnels, geo maps etc. use point name in X row
if (!series.xAxis || series.exportKey === 'name') {
rows[key].name = (
series.data[pIdx] &&
series.data[pIdx].name
);
}
while (j < valueCount) {
prop = pointArrayMap[j]; // y, z etc
val = point[prop];
rows[key][i + j] = pick(
categoryMap[prop][val], // Y axis category if present
datetimeValueAxisMap[prop] ?
time.dateFormat(csvOptions.dateFormat, val) :
null,
val
);
j++;
}
});
i = i + j;
}
});
// Make a sortable array
for (x in rows) {
if (rows.hasOwnProperty(x)) {
rowArr.push(rows[x]);
}
}
var xAxisIndex, column;
// Add computed column headers and top level headers to final row set
dataRows = multiLevelHeaders ? [topLevelColumnTitles, columnTitles] :
[columnTitles];
i = xAxisIndices.length;
while (i--) { // Start from end to splice in
xAxisIndex = xAxisIndices[i][0];
column = xAxisIndices[i][1];
xAxis = xAxes[xAxisIndex];
// Sort it by X values
rowArr.sort(function (a, b) { // eslint-disable-line no-loop-func
return a.xValues[xAxisIndex] - b.xValues[xAxisIndex];
});
// Add header row
xTitle = columnHeaderFormatter(xAxis);
dataRows[0].splice(column, 0, xTitle);
if (multiLevelHeaders && dataRows[1]) {
// If using multi level headers, we just added top level header.
// Also add for sub level
dataRows[1].splice(column, 0, xTitle);
}
// Add the category column
each(rowArr, function (row) { // eslint-disable-line no-loop-func
var category = row.name;
if (xAxis && !defined(category)) {
if (xAxis.isDatetimeAxis) {
if (row.x instanceof Date) {
row.x = row.x.getTime();
}
category = time.dateFormat(
csvOptions.dateFormat,
row.x
);
} else if (xAxis.categories) {
category = pick(
xAxis.names[row.x],
xAxis.categories[row.x],
row.x
);
} else {
category = row.x;
}
}
// Add the X/date/category
row.splice(column, 0, category);
});
}
dataRows = dataRows.concat(rowArr);
return dataRows;
};
/**
* Export-data module required. Returns the current chart data as a CSV string.
*
* @param {Boolean} useLocalDecimalPoint
* Whether to use the local decimal point as detected from the browser.
* This makes it easier to export data to Excel in the same locale as
* the user is.
*
* @returns {String}
* CSV representation of the data
*/
Highcharts.Chart.prototype.getCSV = function (useLocalDecimalPoint) {
var csv = '',
rows = this.getDataRows(),
csvOptions = this.options.exporting.csv,
decimalPoint = pick(
csvOptions.decimalPoint,
csvOptions.itemDelimiter !== ',' && useLocalDecimalPoint ?
(1.1).toLocaleString()[1] :
'.'
),
// use ';' for direct to Excel
itemDelimiter = pick(
csvOptions.itemDelimiter,
decimalPoint === ',' ? ';' : ','
),
// '\n' isn't working with the js csv data extraction
lineDelimiter = csvOptions.lineDelimiter;
// Transform the rows to CSV
each(rows, function (row, i) {
var val = '',
j = row.length;
while (j--) {
val = row[j];
if (typeof val === 'string') {
val = '"' + val + '"';
}
if (typeof val === 'number') {
if (decimalPoint !== '.') {
val = val.toString().replace('.', decimalPoint);
}
}
row[j] = val;
}
// Add the values
csv += row.join(itemDelimiter);
// Add the line delimiter
if (i < rows.length - 1) {
csv += lineDelimiter;
}
});
return csv;
};
/**
* Export-data module required. Build a HTML table with the chart's current
* data.
*
* @sample highcharts/export-data/viewdata/
* View the data from the export menu
* @returns {String}
* HTML representation of the data.
*/
Highcharts.Chart.prototype.getTable = function (useLocalDecimalPoint) {
var html = '<table>',
options = this.options,
decimalPoint = useLocalDecimalPoint ? (1.1).toLocaleString()[1] : '.',
useMultiLevelHeaders = pick(
options.exporting.useMultiLevelHeaders, true
),
rows = this.getDataRows(useMultiLevelHeaders),
rowLength = 0,
topHeaders = useMultiLevelHeaders ? rows.shift() : null,
subHeaders = rows.shift(),
// Compare two rows for equality
isRowEqual = function (row1, row2) {
var i = row1.length;
if (row2.length === i) {
while (i--) {
if (row1[i] !== row2[i]) {
return false;
}
}
} else {
return false;
}
return true;
},
// Get table cell HTML from value
getCellHTMLFromValue = function (tag, classes, attrs, value) {
var val = pick(value, ''),
className = 'text' + (classes ? ' ' + classes : '');
// Convert to string if number
if (typeof val === 'number') {
val = val.toString();
if (decimalPoint === ',') {
val = val.replace('.', decimalPoint);
}
className = 'number';
} else if (!value) {
className = 'empty';
}
return '<' + tag + (attrs ? ' ' + attrs : '') +
' class="' + className + '">' +
val + '</' + tag + '>';
},
// Get table header markup from row data
getTableHeaderHTML = function (topheaders, subheaders, rowLength) {
var html = '<thead>',
i = 0,
len = rowLength || subheaders && subheaders.length,
next,
cur,
curColspan = 0,
rowspan;
// Clean up multiple table headers. Chart.getDataRows() returns two
// levels of headers when using multilevel, not merged. We need to
// merge identical headers, remove redundant headers, and keep it
// all marked up nicely.
if (
useMultiLevelHeaders &&
topheaders &&
subheaders &&
!isRowEqual(topheaders, subheaders)
) {
html += '<tr>';
for (; i < len; ++i) {
cur = topheaders[i];
next = topheaders[i + 1];
if (cur === next) {
++curColspan;
} else if (curColspan) {
// Ended colspan
// Add cur to HTML with colspan.
html += getCellHTMLFromValue(
'th',
'highcharts-table-topheading',
'scope="col" ' +
'colspan="' + (curColspan + 1) + '"',
cur
);
curColspan = 0;
} else {
// Cur is standalone. If it is same as sublevel,
// remove sublevel and add just toplevel.
if (cur === subheaders[i]) {
if (options.exporting.useRowspanHeaders) {
rowspan = 2;
delete subheaders[i];
} else {
rowspan = 1;
subheaders[i] = '';
}
} else {
rowspan = 1;
}
html += getCellHTMLFromValue(
'th',
'highcharts-table-topheading',
'scope="col"' +
(rowspan > 1 ?
' valign="top" rowspan="' + rowspan + '"' :
''),
cur
);
}
}
html += '</tr>';
}
// Add the subheaders (the only headers if not using multilevels)
if (subheaders) {
html += '<tr>';
for (i = 0, len = subheaders.length; i < len; ++i) {
if (subheaders[i] !== undefined) {
html += getCellHTMLFromValue(
'th', null, 'scope="col"', subheaders[i]
);
}
}
html += '</tr>';
}
html += '</thead>';
return html;
};
// Add table caption
if (options.exporting.tableCaption !== false) {
html += '<caption class="highcharts-table-caption">' + pick(
options.exporting.tableCaption,
(
options.title.text ?
htmlencode(options.title.text) :
'Chart'
)) +
'</caption>';
}
// Find longest row
for (var i = 0, len = rows.length; i < len; ++i) {
if (rows[i].length > rowLength) {
rowLength = rows[i].length;
}
}
// Add header
html += getTableHeaderHTML(
topHeaders,
subHeaders,
Math.max(rowLength, subHeaders.length)
);
// Transform the rows to HTML
html += '<tbody>';
each(rows, function (row) {
html += '<tr>';
for (var j = 0; j < rowLength; j++) {
// Make first column a header too. Especially important for
// category axes, but also might make sense for datetime? Should
// await user feedback on this.
html += getCellHTMLFromValue(
j ? 'td' : 'th',
null,
j ? '' : 'scope="row"',
row[j]
);
}
html += '</tr>';
});
html += '</tbody></table>';
return html;
};
/**
* File download using download attribute if supported.
*
* @private
*/
Highcharts.Chart.prototype.fileDownload = function (href, extension, content) {
var a,
blobObject,
name;
if (this.options.exporting.filename) {
name = this.options.exporting.filename;
} else if (this.title && this.title.textStr) {
name = this.title.textStr.replace(/ /g, '-').toLowerCase();
} else {
name = 'chart';
}
// MS specific. Check this first because of bug with Edge (#76)
if (win.Blob && win.navigator.msSaveOrOpenBlob) {
// Falls to msSaveOrOpenBlob if download attribute is not supported
blobObject = new win.Blob(
['\uFEFF' + content], // #7084
{ type: 'text/csv' }
);
win.navigator.msSaveOrOpenBlob(blobObject, name + '.' + extension);
// Download attribute supported
} else if (downloadAttrSupported) {
a = doc.createElement('a');
a.href = href;
a.download = name + '.' + extension;
this.container.appendChild(a); // #111
a.click();
a.remove();
} else {
Highcharts.error('The browser doesn\'t support downloading files');
}
};
/**
* Call this on click of 'Download CSV' button
*
* @private
*/
Highcharts.Chart.prototype.downloadCSV = function () {
var csv = this.getCSV(true);
this.fileDownload(
'data:text/csv,\uFEFF' + encodeURIComponent(csv),
'csv',
csv,
'text/csv'
);
};
/**
* Call this on click of 'Download XLS' button
*
* @private
*/
Highcharts.Chart.prototype.downloadXLS = function () {
var uri = 'data:application/vnd.ms-excel;base64,',
template = '<html xmlns:o="urn:schemas-microsoft-com:office:office" ' +
'xmlns:x="urn:schemas-microsoft-com:office:excel" ' +
'xmlns="http://www.w3.org/TR/REC-html40">' +
'<head><!--[if gte mso 9]><xml><x:ExcelWorkbook>' +
'<x:ExcelWorksheets><x:ExcelWorksheet>' +
'<x:Name>Ark1</x:Name>' +
'<x:WorksheetOptions><x:DisplayGridlines/></x:WorksheetOptions>' +
'</x:ExcelWorksheet></x:ExcelWorksheets></x:ExcelWorkbook>' +
'</xml><![endif]-->' +
'<style>td{border:none;font-family: Calibri, sans-serif;} ' +
'.number{mso-number-format:"0.00";} ' +
'.text{ mso-number-format:"\@";}</style>' +
'<meta name=ProgId content=Excel.Sheet>' +
'<meta charset=UTF-8>' +
'</head><body>' +
this.getTable(true) +
'</body></html>',
base64 = function (s) {
return win.btoa(unescape(encodeURIComponent(s))); // #50
};
this.fileDownload(
uri + base64(template),
'xls',
template,
'application/vnd.ms-excel'
);
};
/**
* Export-data module required. View the data in a table below the chart.
*/
Highcharts.Chart.prototype.viewData = function () {
if (!this.dataTableDiv) {
this.dataTableDiv = doc.createElement('div');
this.dataTableDiv.className = 'highcharts-data-table';
// Insert after the chart container
this.renderTo.parentNode.insertBefore(
this.dataTableDiv,
this.renderTo.nextSibling
);
}
this.dataTableDiv.innerHTML = this.getTable();
};
/**
* Experimental function to send a chart's config to the Cloud for editing.
*
* Limitations
* - All functions (formatters and callbacks) are removed since they're not
* JSON.
*
* @todo
* - Let the Cloud throw a friendly warning about unsupported structures like
* formatters.
* - Dynamically updated charts probably fail, we need a generic
* Chart.getOptions function that returns all non-default options. Should also
* be used by the export module.
*/
Highcharts.Chart.prototype.openInCloud = function () {
var options,
paramObj,
params;
// Recursively remove function callbacks
function removeFunctions(ob) {
Object.keys(ob).forEach(function (key) {
if (typeof ob[key] === 'function') {
delete ob[key];
}
if (Highcharts.isObject(ob[key])) { // object and not an array
removeFunctions(ob[key]);
}
});
}
function openInCloud() {
var form = doc.createElement('form');
doc.body.appendChild(form);
form.method = 'post';
form.action = 'https://cloud-api.highcharts.com/openincloud';
form.target = '_blank';
var input = doc.createElement('input');
input.type = 'hidden';
input.name = 'chart';
input.value = params;
form.appendChild(input);
form.submit();
doc.body.removeChild(form);
}
options = Highcharts.merge(this.userOptions);
removeFunctions(options);
paramObj = {
name: (options.title && options.title.text) || 'Chart title',
options: options,
settings: {
constructor: 'Chart',
dataProvider: {
csv: this.getCSV()
}
}
};
params = JSON.stringify(paramObj);
openInCloud();
};
// Add "Download CSV" to the exporting menu.
var exportingOptions = Highcharts.getOptions().exporting;
if (exportingOptions) {
Highcharts.extend(exportingOptions.menuItemDefinitions, {
downloadCSV: {
textKey: 'downloadCSV',
onclick: function () {
this.downloadCSV();
}
},
downloadXLS: {
textKey: 'downloadXLS',
onclick: function () {
this.downloadXLS();
}
},
viewData: {
textKey: 'viewData',
onclick: function () {
this.viewData();
}
},
openInCloud: {
textKey: 'openInCloud',
onclick: function () {
this.openInCloud();
}
}
});
exportingOptions.buttons.contextButton.menuItems.push(
'separator',
'downloadCSV',
'downloadXLS',
'viewData',
'openInCloud'
);
}
// Series specific
if (seriesTypes.map) {
seriesTypes.map.prototype.exportKey = 'name';
}
if (seriesTypes.mapbubble) {
seriesTypes.mapbubble.prototype.exportKey = 'name';
}
if (seriesTypes.treemap) {
seriesTypes.treemap.prototype.exportKey = 'name';
}
}(Highcharts));
}));
| cdnjs/cdnjs | ajax/libs/highcharts/6.1.0/modules/export-data.src.js | JavaScript | mit | 34,704 |
/**@deprecated use "const { siRedmine } = require('simple-icons/icons');" instead*/declare const i:import("../alias").I;export default i; | cdnjs/cdnjs | ajax/libs/simple-icons/6.9.0/redmine.d.ts | TypeScript | mit | 137 |
<?php
namespace Ddeboer\DataImport\Tests\ValueConverter;
use Ddeboer\DataImport\ValueConverter\ArrayValueConverterMap;
use Ddeboer\DataImport\ValueConverter\CallbackValueConverter;
/**
* @author Christoph Rosse <[email protected]>
*/
class ArrayValueConverterMapTest extends \PHPUnit_Framework_TestCase
{
/**
* @expectedException InvalidArgumentException
*/
public function testConvertWithNoArrayArgument()
{
$converter = new ArrayValueConverterMap(array('foo' => new CallbackValueConverter(function($input) {return $input;})));
$converter->convert('foo');
}
public function testConvertWithMultipleFields()
{
$data = array(
array(
'foo' => 'test',
'bar' => 'test'
),
array(
'foo' => 'test2',
'bar' => 'test2'
),
);
$addBarConverter = new CallbackValueConverter(function($input) { return 'bar'.$input; });
$addBazConverter = new CallbackValueConverter(function($input) { return 'baz'.$input; });
$converter = new ArrayValueConverterMap(
array(
'foo' => array($addBarConverter),
'bar' => array($addBazConverter, $addBarConverter),
)
);
$data = $converter->convert($data);
$this->assertEquals('bartest', $data[0]['foo']);
$this->assertEquals('barbaztest', $data[0]['bar']);
$this->assertEquals('bartest2', $data[1]['foo']);
$this->assertEquals('barbaztest2', $data[1]['bar']);
}
}
| abenamer/thereveal | wp-content/plugins/dfp-ads/vendor/ddeboer/data-import/tests/Ddeboer/DataImport/Tests/ValueConverter/ArrayValueConverterMapTest.php | PHP | gpl-2.0 | 1,600 |
<?php
/* vim: set expandtab tabstop=4 shiftwidth=4: */
//
// +----------------------------------------------------------------------+
// | PHP version 4 |
// +----------------------------------------------------------------------+
// | Copyright (c) 1997-2003 The PHP Group |
// +----------------------------------------------------------------------+
// | This source file is subject to version 3.0 of the PHP license, |
// | that is bundled with this package in the file LICENSE, and is |
// | available at through the world-wide-web at |
// | http://www.php.net/license/3_0.txt. |
// | If you did not receive a copy of the PHP license and are unable to |
// | obtain it through the world-wide-web, please send a note to |
// | [email protected] so we can mail you a copy immediately. |
// +----------------------------------------------------------------------+
// | Authors: Martin Marrese <[email protected]> |
// | Based On: lang_es.php - Xavier Noguer |
// +----------------------------------------------------------------------+
// $Id$
//
// Numbers_Words class extension to spell numbers in Argentinian Spanish
//
//
/**
* Class for translating numbers into Argentinian Spanish.
*
* @author Martin Marrese
* @package Numbers_Words
*/
/**
* Include needed files
*/
require_once("Numbers/Words.php");
/**
* Class for translating numbers into Argentinian Spanish.
* It supports up to decallones (10^6).
* It doesn't support spanish tonic accents (acentos).
*
* @author Martin Marrese
* @package Numbers_Words
*/
class Numbers_Words_es_AR extends Numbers_Words
{
// {{{ properties
/**
* Locale name
* @var string
* @access public
*/
var $locale = 'es_AR';
/**
* Language name in English
* @var string
* @access public
*/
var $lang = 'Spanish';
/**
* Native language name
* @var string
* @access public
*/
var $lang_native = 'Español';
/**
* The word for the minus sign
* @var string
* @access private
*/
var $_minus = 'menos';
/**
* The sufixes for exponents (singular and plural)
* @var array
* @access private
*/
var $_exponent = array(
0 => array('',''),
3 => array('mil','mil'),
6 => array('millón','millones'),
12 => array('billón','billones'),
18 => array('trilón','trillones'),
24 => array('cuatrillón','cuatrillones'),
30 => array('quintillón','quintillones'),
36 => array('sextillón','sextillones'),
42 => array('septillón','septillones'),
48 => array('octallón','octallones'),
54 => array('nonallón','nonallones'),
60 => array('decallón','decallones'),
);
/**
* The array containing the digits (indexed by the digits themselves).
* @var array
* @access private
*/
var $_digits = array(
0 => 'cero', 'uno', 'dos', 'tres', 'cuatro',
'cinco', 'seis', 'siete', 'ocho', 'nueve'
);
/**
* The word separator
* @var string
* @access private
*/
var $_sep = ' ';
/**
* The currency names (based on the below links,
* informations from central bank websites and on encyclopedias)
*
* @var array
* @link http://30-03-67.dreamstation.com/currency_alfa.htm World Currency Information
* @link http://www.jhall.demon.co.uk/currency/by_abbrev.html World currencies
* @link http://www.shoestring.co.kr/world/p.visa/change.htm Currency names in English
* @access private
*/
var $_currency_names = array(
'ALL' => array(array('lek'), array('qindarka')),
'AUD' => array(array('Australian dollar'), array('cent')),
'ARS' => array(array('Peso'), array ('centavo')),
'BAM' => array(array('convertible marka'), array('fenig')),
'BGN' => array(array('lev'), array('stotinka')),
'BRL' => array(array('real'), array('centavos')),
'BYR' => array(array('Belarussian rouble'), array('kopiejka')),
'CAD' => array(array('Canadian dollar'), array('cent')),
'CHF' => array(array('Swiss franc'), array('rapp')),
'CYP' => array(array('Cypriot pound'), array('cent')),
'CZK' => array(array('Czech koruna'), array('halerz')),
'DKK' => array(array('Danish krone'), array('ore')),
'EEK' => array(array('kroon'), array('senti')),
'EUR' => array(array('euro'), array('euro-cent')),
'GBP' => array(array('pound', 'pounds'), array('pence')),
'HKD' => array(array('Hong Kong dollar'), array('cent')),
'HRK' => array(array('Croatian kuna'), array('lipa')),
'HUF' => array(array('forint'), array('filler')),
'ILS' => array(array('new sheqel','new sheqels'), array('agora','agorot')),
'ISK' => array(array('Icelandic króna'), array('aurar')),
'JPY' => array(array('yen'), array('sen')),
'LTL' => array(array('litas'), array('cent')),
'LVL' => array(array('lat'), array('sentim')),
'MKD' => array(array('Macedonian dinar'), array('deni')),
'MTL' => array(array('Maltese lira'), array('centym')),
'NOK' => array(array('Norwegian krone'), array('oere')),
'PLN' => array(array('zloty', 'zlotys'), array('grosz')),
'ROL' => array(array('Romanian leu'), array('bani')),
'RUB' => array(array('Russian Federation rouble'), array('kopiejka')),
'SEK' => array(array('Swedish krona'), array('oere')),
'SIT' => array(array('Tolar'), array('stotinia')),
'SKK' => array(array('Slovak koruna'), array()),
'TRL' => array(array('lira'), array('kuruþ')),
'UAH' => array(array('hryvna'), array('cent')),
'USD' => array(array('dollar'), array('cent')),
'YUM' => array(array('dinars'), array('para')),
'ZAR' => array(array('rand'), array('cent'))
);
/**
* The default currency name
* @var string
* @access public
*/
var $def_currency = 'ARS'; // Argentinian Peso
// }}}
// {{{ toWords()
/**
* Converts a number to its word representation
* in Argentinian Spanish.
*
* @param float $num An float between -infinity and infinity inclusive :)
* that should be converted to a words representation
* @param integer $power The power of ten for the rest of the number to the right.
* For example toWords(12,3) should give "doce mil".
* Optional, defaults to 0.
* @return string The corresponding word representation
*
* @access private
* @author Martin Marrese
*/
function toWords($num, $power = 0)
{
// The return string;
$ret = '';
// add a the word for the minus sign if necessary
if (substr($num, 0, 1) == '-')
{
$ret = $this->_sep . $this->_minus;
$num = substr($num, 1);
}
// strip excessive zero signs
$num = preg_replace('/^0+/','',$num);
$num_tmp = preg_split ('/\./D', $num);
$num = $num_tmp[0];
$dec = (@$num_tmp[1]) ? $num_tmp[1] : '';
if (strlen($num) > 6)
{
$current_power = 6;
// check for highest power
if (isset($this->_exponent[$power]))
{
// convert the number above the first 6 digits
// with it's corresponding $power.
$snum = substr($num, 0, -6);
$snum = preg_replace('/^0+/','',$snum);
if ($snum !== '') {
$ret .= $this->toWords($snum, $power + 6);
}
}
$num = substr($num, -6);
if ($num == 0) {
return $ret;
}
}
elseif ($num == 0 || $num == '') {
return(' '.$this->_digits[0]);
$current_power = strlen($num);
}
else {
$current_power = strlen($num);
}
// See if we need "thousands"
$thousands = floor($num / 1000);
if ($thousands == 1) {
$ret .= $this->_sep . 'mil';
}
elseif ($thousands > 1) {
$ret .= $this->toWords($thousands, 3);
}
// values for digits, tens and hundreds
$h = floor(($num / 100) % 10);
$t = floor(($num / 10) % 10);
$d = floor($num % 10);
// cientos: doscientos, trescientos, etc...
switch ($h)
{
case 1:
if (($d == 0) and ($t == 0)) { // is it's '100' use 'cien'
$ret .= $this->_sep . 'cien';
}
else {
$ret .= $this->_sep . 'ciento';
}
break;
case 2:
case 3:
case 4:
case 6:
case 8:
$ret .= $this->_sep . $this->_digits[$h] . 'cientos';
break;
case 5:
$ret .= $this->_sep . 'quinientos';
break;
case 7:
$ret .= $this->_sep . 'setecientos';
break;
case 9:
$ret .= $this->_sep . 'novecientos';
break;
}
// decenas: veinte, treinta, etc...
switch ($t)
{
case 9:
$ret .= $this->_sep . 'noventa';
break;
case 8:
$ret .= $this->_sep . 'ochenta';
break;
case 7:
$ret .= $this->_sep . 'setenta';
break;
case 6:
$ret .= $this->_sep . 'sesenta';
break;
case 5:
$ret .= $this->_sep . 'cincuenta';
break;
case 4:
$ret .= $this->_sep . 'cuarenta';
break;
case 3:
$ret .= $this->_sep . 'treinta';
break;
case 2:
if ($d == 0) {
$ret .= $this->_sep . 'veinte';
}
else {
if (($power > 0) and ($d == 1)) {
$ret .= $this->_sep . 'veintiún';
}
else {
$ret .= $this->_sep . 'veinti' . $this->_digits[$d];
}
}
break;
case 1:
switch ($d)
{
case 0:
$ret .= $this->_sep . 'diez';
break;
case 1:
$ret .= $this->_sep . 'once';
break;
case 2:
$ret .= $this->_sep . 'doce';
break;
case 3:
$ret .= $this->_sep . 'trece';
break;
case 4:
$ret .= $this->_sep . 'catorce';
break;
case 5:
$ret .= $this->_sep . 'quince';
break;
case 6:
case 7:
case 9:
case 8:
$ret .= $this->_sep . 'dieci' . $this->_digits[$d];
break;
}
break;
}
// add digits only if it is a multiple of 10 and not 1x or 2x
if (($t != 1) and ($t != 2) and ($d > 0))
{
if($t != 0) // don't add 'y' for numbers below 10
{
// use 'un' instead of 'uno' when there is a suffix ('mil', 'millones', etc...)
if(($power > 0) and ($d == 1)) {
$ret .= $this->_sep.' y un';
}
else {
$ret .= $this->_sep.'y '.$this->_digits[$d];
}
}
else {
if(($power > 0) and ($d == 1)) {
$ret .= $this->_sep.'un';
}
else {
$ret .= $this->_sep.$this->_digits[$d];
}
}
}
if ($power > 0)
{
if (isset($this->_exponent[$power])) {
$lev = $this->_exponent[$power];
}
if (!isset($lev) || !is_array($lev)) {
return null;
}
// if it's only one use the singular suffix
if (($d == 1) and ($t == 0) and ($h == 0)) {
$suffix = $lev[0];
}
else {
$suffix = $lev[1];
}
if ($num != 0) {
$ret .= $this->_sep . $suffix;
}
}
if ($dec) {
$dec = $this->toWords(trim($dec));
$ret.= ' con ' . trim ($dec);
}
return $ret;
}
// }}}
// {{{ toCurrency()
/**
* Converts a currency value to its word representation
* (with monetary units) in Agentinian Spanish language
*
* @param integer $int_curr An international currency symbol
* as defined by the ISO 4217 standard (three characters)
* @param integer $decimal A money total amount without fraction part (e.g. amount of dollars)
* @param integer $fraction Fractional part of the money amount (e.g. amount of cents)
* Optional. Defaults to false.
*
* @return string The corresponding word representation for the currency
*
* @access public
* @author Martin Marrese
*/
function toCurrencyWords($int_curr, $decimal, $fraction = false) {
$int_curr = strtoupper($int_curr);
if (!isset($this->_currency_names[$int_curr])) {
$int_curr = $this->def_currency;
}
$curr_names = $this->_currency_names[$int_curr];
$lev = ($decimal == 1) ? 0 : 1;
if ($lev > 0) {
if (count($curr_names[0]) > 1) {
$ret = $curr_names[0][$lev];
} else {
$ret = $curr_names[0][0] . 's';
}
} else {
$ret = $curr_names[0][0];
}
$ret .= $this->_sep . trim($this->toWords($decimal));
if ($fraction !== false) {
$ret .= $this->_sep .'con'. $this->_sep . trim($this->toWords($fraction));
$lev = ($fraction == 1) ? 0 : 1;
if ($lev > 0) {
if (count($curr_names[1]) > 1) {
$ret .= $this->_sep . $curr_names[1][$lev];
} else {
$ret .= $this->_sep . $curr_names[1][0] . 's';
}
} else {
$ret .= $this->_sep . $curr_names[1][0];
}
}
return $ret;
}
// }}}
}
?>
| AdRiverSoftware/AdServerProjectWebnock | lib/pear/Numbers/Words/lang.es_AR.php | PHP | gpl-2.0 | 15,158 |
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef _JAVA_H_
#define _JAVA_H_
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <jni.h>
#include <jvm.h>
/*
* Get system specific defines.
*/
#include "emessages.h"
#include "java_md.h"
#include "jli_util.h"
#include "manifest_info.h"
#include "version_comp.h"
#include "wildcard.h"
#include "splashscreen.h"
# define KB (1024UL)
# define MB (1024UL * KB)
# define GB (1024UL * MB)
#define CURRENT_DATA_MODEL (CHAR_BIT * sizeof(void*))
/*
* The following environment variable is used to influence the behavior
* of the jre exec'd through the SelectVersion routine. The command line
* options which specify the version are not passed to the exec'd version,
* because that jre may be an older version which wouldn't recognize them.
* This environment variable is known to this (and later) version and serves
* to suppress the version selection code. This is not only for efficiency,
* but also for correctness, since any command line options have been
* removed which would cause any value found in the manifest to be used.
* This would be incorrect because the command line options are defined
* to take precedence.
*
* The value associated with this environment variable is the MainClass
* name from within the executable jar file (if any). This is strictly a
* performance enhancement to avoid re-reading the jar file manifest.
*
*/
#define ENV_ENTRY "_JAVA_VERSION_SET"
#define SPLASH_FILE_ENV_ENTRY "_JAVA_SPLASH_FILE"
#define SPLASH_JAR_ENV_ENTRY "_JAVA_SPLASH_JAR"
/*
* Pointers to the needed JNI invocation API, initialized by LoadJavaVM.
*/
typedef jint (JNICALL *CreateJavaVM_t)(JavaVM **pvm, void **env, void *args);
typedef jint (JNICALL *GetDefaultJavaVMInitArgs_t)(void *args);
typedef struct {
CreateJavaVM_t CreateJavaVM;
GetDefaultJavaVMInitArgs_t GetDefaultJavaVMInitArgs;
} InvocationFunctions;
int
JLI_Launch(int argc, char ** argv, /* main argc, argc */
int jargc, const char** jargv, /* java args */
int appclassc, const char** appclassv, /* app classpath */
const char* fullversion, /* full version defined */
const char* dotversion, /* dot version defined */
const char* pname, /* program name */
const char* lname, /* launcher name */
jboolean javaargs, /* JAVA_ARGS */
jboolean cpwildcard, /* classpath wildcard */
jboolean javaw, /* windows-only javaw */
jint ergo_class /* ergnomics policy */
);
/*
* Prototypes for launcher functions in the system specific java_md.c.
*/
jboolean
LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn);
void
GetXUsagePath(char *buf, jint bufsize);
jboolean
GetApplicationHome(char *buf, jint bufsize);
#define GetArch() GetArchPath(CURRENT_DATA_MODEL)
/*
* Different platforms will implement this, here
* pargc is a pointer to the original argc,
* pargv is a pointer to the original argv,
* jrepath is an accessible path to the jre as determined by the call
* so_jrepath is the length of the buffer jrepath
* jvmpath is an accessible path to the jvm as determined by the call
* so_jvmpath is the length of the buffer jvmpath
*/
void CreateExecutionEnvironment(int *argc, char ***argv,
char *jrepath, jint so_jrepath,
char *jvmpath, jint so_jvmpath);
/* Reports an error message to stderr or a window as appropriate. */
void JLI_ReportErrorMessage(const char * message, ...);
/* Reports a system error message to stderr or a window */
void JLI_ReportErrorMessageSys(const char * message, ...);
/* Reports an error message only to stderr. */
void JLI_ReportMessage(const char * message, ...);
/*
* Reports an exception which terminates the vm to stderr or a window
* as appropriate.
*/
void JLI_ReportExceptionDescription(JNIEnv * env);
void PrintMachineDependentOptions();
const char *jlong_format_specifier();
/*
* Block current thread and continue execution in new thread
*/
int ContinueInNewThread0(int (JNICALL *continuation)(void *),
jlong stack_size, void * args);
/* sun.java.launcher.* platform properties. */
void SetJavaLauncherPlatformProps(void);
void SetJavaCommandLineProp(char* what, int argc, char** argv);
void SetJavaLauncherProp(void);
/*
* Functions defined in java.c and used in java_md.c.
*/
jint ReadKnownVMs(const char *jrepath, const char * arch, jboolean speculative);
char *CheckJvmType(int *argc, char ***argv, jboolean speculative);
void AddOption(char *str, void *info);
enum ergo_policy {
DEFAULT_POLICY = 0,
NEVER_SERVER_CLASS,
ALWAYS_SERVER_CLASS
};
const char* GetProgramName();
const char* GetDotVersion();
const char* GetFullVersion();
jboolean IsJavaArgs();
jboolean IsJavaw();
jint GetErgoPolicy();
jboolean ServerClassMachine();
static int ContinueInNewThread(InvocationFunctions* ifn,
int argc, char** argv,
int mode, char *what, int ret);
/*
* Initialize platform specific settings
*/
void InitLauncher(jboolean javaw);
/*
* This allows for finding classes from the VM's bootstrap class loader directly,
* FindClass uses the application class loader internally, this will cause
* unnecessary searching of the classpath for the required classes.
*
*/
typedef jclass (JNICALL FindClassFromBootLoader_t(JNIEnv *env,
const char *name));
jclass FindBootStrapClass(JNIEnv *env, const char *classname);
#endif /* _JAVA_H_ */
| ikeji/openjdk7-jdk | src/share/bin/java.h | C | gpl-2.0 | 6,935 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_71) on Tue Feb 16 15:23:08 EST 2016 -->
<meta http-equiv="Content-Type" content="text/html" charset="utf-8">
<title>Uses of Class org.apache.solr.search.stats.LocalStatsSource (Solr 5.5.0 API)</title>
<meta name="date" content="2016-02-16">
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.solr.search.stats.LocalStatsSource (Solr 5.5.0 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/apache/solr/search/stats/LocalStatsSource.html" title="class in org.apache.solr.search.stats">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/apache/solr/search/stats/class-use/LocalStatsSource.html" target="_top">Frames</a></li>
<li><a href="LocalStatsSource.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.apache.solr.search.stats.LocalStatsSource" class="title">Uses of Class<br>org.apache.solr.search.stats.LocalStatsSource</h2>
</div>
<div class="classUseContainer">No usage of org.apache.solr.search.stats.LocalStatsSource</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../org/apache/solr/search/stats/LocalStatsSource.html" title="class in org.apache.solr.search.stats">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/apache/solr/search/stats/class-use/LocalStatsSource.html" target="_top">Frames</a></li>
<li><a href="LocalStatsSource.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>
<i>Copyright © 2000-2016 Apache Software Foundation. All Rights Reserved.</i>
<script src='../../../../../../prettify.js' type='text/javascript'></script>
<script type='text/javascript'>
(function(){
var oldonload = window.onload;
if (typeof oldonload != 'function') {
window.onload = prettyPrint;
} else {
window.onload = function() {
oldonload();
prettyPrint();
}
}
})();
</script>
</small></p>
</body>
</html>
| ubtue/KrimDok | solr/vendor/docs/solr-core/org/apache/solr/search/stats/class-use/LocalStatsSource.html | HTML | gpl-2.0 | 4,888 |
/* packet-rmcp.c
* Routines for RMCP packet dissection
*
* Duncan Laurie <[email protected]>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* Copied from packet-tftp.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <epan/packet.h>
void proto_register_rmcp(void);
void proto_register_rsp(void);
void proto_reg_handoff_rmcp(void);
void proto_reg_handoff_rsp(void);
/*
* See
* http://www.dmtf.org/standards/standard_alert.php
* http://www.dmtf.org/standards/documents/ASF/DSP0136.pdf
* (the ASF specification includes RMCP)
*/
static int proto_rmcp = -1;
static int hf_rmcp_version = -1;
static int hf_rmcp_reserved = -1;
static int hf_rmcp_sequence = -1;
static int hf_rmcp_class = -1;
static int hf_rmcp_type = -1;
static int hf_rmcp_trailer = -1;
static int proto_rsp = -1;
static int hf_rsp_session_id = -1;
static int hf_rsp_sequence = -1;
static gint ett_rmcp = -1;
static gint ett_rmcp_typeclass = -1;
static gint ett_rsp = -1;
static dissector_handle_t data_handle;
static dissector_table_t rmcp_dissector_table;
#define UDP_PORT_RMCP 623
#define UDP_PORT_RMCP_SECURE 664
#define RMCP_TYPE_MASK 0x80
#define RMCP_TYPE_NORM 0x00
#define RMCP_TYPE_ACK 0x01
static const value_string rmcp_type_vals[] = {
{ RMCP_TYPE_NORM, "Normal RMCP" },
{ RMCP_TYPE_ACK, "RMCP ACK" },
{ 0, NULL }
};
#define RMCP_CLASS_MASK 0x1f
#define RMCP_CLASS_ASF 0x06
#define RMCP_CLASS_IPMI 0x07
#define RMCP_CLASS_OEM 0x08
static const value_string rmcp_class_vals[] = {
{ RMCP_CLASS_ASF, "ASF" },
{ RMCP_CLASS_IPMI, "IPMI" },
{ RMCP_CLASS_OEM, "OEM" },
{ 0, NULL }
};
static int
dissect_rmcp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{
proto_tree *rmcp_tree = NULL, *field_tree;
proto_item *ti;
tvbuff_t *next_tvb;
guint8 rmcp_class;
const gchar *class_str;
guint8 type;
guint len;
/*
* Check whether it's a known class value; if not, assume it's
* not RMCP.
*/
if (!tvb_bytes_exist(tvb, 3, 1))
return 0; /* class value byte not present */
rmcp_class = tvb_get_guint8(tvb, 3);
/* Get the normal/ack bit from the RMCP class */
type = (rmcp_class & RMCP_TYPE_MASK) >> 7;
rmcp_class &= RMCP_CLASS_MASK;
class_str = try_val_to_str(rmcp_class, rmcp_class_vals);
if (class_str == NULL)
return 0; /* unknown class value */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "RMCP");
col_add_fstr(pinfo->cinfo, COL_INFO, "%s, Class: %s",
val_to_str(type, rmcp_type_vals, "Unknown (0x%02x)"),
class_str);
if (tree) {
ti = proto_tree_add_protocol_format(tree, proto_rmcp, tvb, 0, 4,
"Remote Management Control Protocol, Class: %s",
class_str);
rmcp_tree = proto_item_add_subtree(ti, ett_rmcp);
proto_tree_add_item(rmcp_tree, hf_rmcp_version, tvb, 0, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(rmcp_tree, hf_rmcp_reserved, tvb, 1, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(rmcp_tree, hf_rmcp_sequence, tvb, 2, 1, ENC_LITTLE_ENDIAN);
field_tree = proto_tree_add_subtree_format(rmcp_tree, tvb, 3, 1,
ett_rmcp_typeclass, NULL, "Type: %s, Class: %s",
val_to_str(type, rmcp_type_vals, "Unknown (0x%02x)"),
class_str);
proto_tree_add_item(field_tree, hf_rmcp_class, tvb, 3, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(field_tree, hf_rmcp_type, tvb, 3, 1, ENC_LITTLE_ENDIAN);
}
if (!type){ /* do not expect a data block for an ACK */
next_tvb = tvb_new_subset_remaining(tvb, 4);
if (!dissector_try_uint(rmcp_dissector_table, rmcp_class, next_tvb, pinfo,
tree)) {
len = call_dissector(data_handle, next_tvb, pinfo, tree);
if (len < tvb_reported_length(next_tvb)) {
proto_tree_add_item(tree, hf_rmcp_trailer, tvb, 4 + len, -1, ENC_NA);
}
}
}
return tvb_captured_length(tvb);
}
static int
dissect_rsp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{
proto_tree *rsp_tree = NULL/*, *field_tree*/;
proto_item *ti/*, *tf*/;
tvbuff_t *next_tvb;
int offset = 0;
if (tree) {
ti = proto_tree_add_protocol_format(tree, proto_rsp, tvb, offset, 8,
"RMCP Security-extension Protocol");
rsp_tree = proto_item_add_subtree(ti, ett_rsp);
proto_tree_add_item(rsp_tree, hf_rsp_session_id, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
proto_tree_add_item(rsp_tree, hf_rsp_sequence, tvb, offset, 4, ENC_BIG_ENDIAN);
/*offset += 4;*/
}
/* XXX determination of RCMP message length needs to
* be done according to 3.2.3.3.3 of the specification.
* This is only valid for session ID equals 0
*/
next_tvb = tvb_new_subset_remaining(tvb, 8);
dissect_rmcp(next_tvb, pinfo, tree, NULL);
return tvb_captured_length(tvb);
}
void
proto_register_rmcp(void)
{
static hf_register_info hf[] = {
{ &hf_rmcp_version, {
"Version", "rmcp.version",
FT_UINT8, BASE_HEX, NULL, 0,
"RMCP Version", HFILL }},
{ &hf_rmcp_reserved, {
"Reserved", "rmcp.version",
FT_UINT8, BASE_HEX, NULL, 0,
"RMCP Reserved", HFILL }},
{ &hf_rmcp_sequence, {
"Sequence", "rmcp.sequence",
FT_UINT8, BASE_HEX, NULL, 0,
"RMCP Sequence", HFILL }},
{ &hf_rmcp_class, {
"Class", "rmcp.class",
FT_UINT8, BASE_HEX,
VALS(rmcp_class_vals), RMCP_CLASS_MASK,
"RMCP Class", HFILL }},
{ &hf_rmcp_type, {
"Message Type", "rmcp.type",
FT_UINT8, BASE_HEX,
VALS(rmcp_type_vals), RMCP_TYPE_MASK,
"RMCP Message Type", HFILL }},
{ &hf_rmcp_trailer, {
"RSP Trailer", "rmcp.trailer",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }},
};
static gint *ett[] = {
&ett_rmcp,
&ett_rmcp_typeclass
};
proto_rmcp = proto_register_protocol(
"Remote Management Control Protocol", "RMCP", "rmcp");
proto_register_field_array(proto_rmcp, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
rmcp_dissector_table = register_dissector_table(
"rmcp.class", "RMCP Class", FT_UINT8, BASE_HEX);
}
void
proto_register_rsp(void)
{
static hf_register_info hf[] = {
{ &hf_rsp_session_id, {
"Session ID", "rsp.session_id",
FT_UINT32, BASE_HEX, NULL, 0,
"RSP session ID", HFILL }},
{ &hf_rsp_sequence, {
"Sequence", "rsp.sequence",
FT_UINT32, BASE_HEX, NULL, 0,
"RSP sequence", HFILL }},
};
static gint *ett[] = {
&ett_rsp
};
proto_rsp = proto_register_protocol(
"RMCP Security-extensions Protocol", "RSP", "rsp");
proto_register_field_array(proto_rsp, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
}
void
proto_reg_handoff_rmcp(void)
{
dissector_handle_t rmcp_handle;
data_handle = find_dissector("data");
rmcp_handle = new_create_dissector_handle(dissect_rmcp, proto_rmcp);
dissector_add_uint("udp.port", UDP_PORT_RMCP, rmcp_handle);
}
void
proto_reg_handoff_rsp(void)
{
dissector_handle_t rsp_handle;
rsp_handle = new_create_dissector_handle(dissect_rsp, proto_rsp);
dissector_add_uint("udp.port", UDP_PORT_RMCP_SECURE, rsp_handle);
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*
* vi: set shiftwidth=8 tabstop=8 noexpandtab:
* :indentSize=8:tabSize=8:noTabs=false:
*/
| pmembrey/wireshark | epan/dissectors/packet-rmcp.c | C | gpl-2.0 | 7,856 |
# bcmdhd
#####################
# SDIO Basic feature
#####################
DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DLINUX -DBCMDRIVER \
-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
-DDHDTHREAD -DBDC -DOOB_INTR_ONLY \
-DDHD_BCMEVENTS -DSHOW_EVENTS -DBCMDBG \
-DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
-DKEEP_ALIVE -DCSCAN -DPKT_FILTER_SUPPORT \
-DEMBEDDED_PLATFORM -DPNO_SUPPORT
#################
# Common feature
#################
DHDCFLAGS += -DCUSTOMER_HW4
DHDCFLAGS += -DBLOCK_IPV6_PACKET -DPASS_IPV4_SUSPEND
DHDCFLAGS += -DSUPPORT_DEEP_SLEEP
DHDCFLAGS += -DSIMPLE_MAC_PRINT
# For p2p connection issue
DHDCFLAGS += -DWL_CFG80211_GON_COLLISION
DHDCFLAGS += -DWL_SCB_TIMEOUT=10
# For Passing all multicast packets to host when not in suspend mode.
DHDCFLAGS += -DPASS_ALL_MCAST_PKTS
# Early suspend
DHDCFLAGS += -DDHD_USE_EARLYSUSPEND
DHDCFLAGS += -DSUPPORT_PM2_ONLY
# For Scan result patch
DHDCFLAGS += -DESCAN_RESULT_PATCH
DHDCFLAGS += -DDUAL_ESCAN_RESULT_BUFFER
DHDCFLAGS += -DROAM_ENABLE -DROAM_CHANNEL_CACHE -DROAM_API
DHDCFLAGS += -DDISABLE_FW_ROAM_SUSPEND
# For Static Buffer
ifeq ($(CONFIG_BROADCOM_WIFI_RESERVED_MEM),y)
DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
DHDCFLAGS += -DENHANCED_STATIC_BUF
DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT
endif
# For CCX
ifeq ($(CONFIG_BRCM_CCX),y)
DHDCFLAGS += -DBCMCCX
endif
DHDCFLAGS += -DWL_CFG80211
# SoftAP
DHDCFLAGS += -DSUPPORT_AUTO_CHANNEL -DSUPPORT_HIDDEN_AP
DHDCFLAGS += -DSUPPORT_SOFTAP_SINGL_DISASSOC
DHDCFLAGS += -DUSE_STAMAC_4SOFTAP
# DPC priority
DHDCFLAGS += -DCUSTOM_DPC_PRIO_SETTING=98
# WiFi turn off delay
DHDCFLAGS += -DWIFI_TURNOFF_DELAY=100
# WiFi Kernel thread type
DHDCFLAGS += -DUSE_KTHREAD_API
############
# JellyBean
############
DHDCFLAGS += -DWL_ENABLE_P2P_IF
DHDCFLAGS += -DMULTIPLE_SUPPLICANT
DHDCFLAGS += -DWL_CFG80211_STA_EVENT
#########################
# Chip dependent feature
#########################
ifneq ($(CONFIG_BCM4334),)
DHDCFLAGS += -DBCM4334_CHIP -DHW_OOB -DSUPPORT_MULTIPLE_REVISION
DHDCFLAGS += -DUSE_CID_CHECK -DCONFIG_CONTROL_PM
DHDCFLAGS += -DPROP_TXSTATUS
DHDCFLAGS += -DVSDB -DHT40_GO
DHDCFLAGS += -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
DHDCFLAGS += -DDHD_USE_IDLECOUNT
DHDCFLAGS += -DSUPPORT_AMPDU_MPDU_CMD
DHDCFLAGS += -DVSDB_DYNAMIC_F2_BLKSIZE -DSDIO_F2_BLKSIZE=512 -DVSDB_F2_BLKSIZE=64
DHDCFLAGS += -DCUSTOM_GLOM_SETTING=5 -DENABLE_BCN_LI_BCN_WAKEUP
ifeq ($(CONFIG_MACH_M3_JPN_DCM),y)
DHDCFLAGS += -DUSE_WEP_AUTH_SHARED_OPEN
endif
DHDCFLAGS += -DROAM_AP_ENV_DETECTION
DHDCFLAGS += -DWES_SUPPORT
DHDCFLAGS += -DWL11U
endif
ifneq ($(CONFIG_BCM4330),)
DHDCFLAGS += -DBCM4330_CHIP
DHDCFLAGS += -DMCAST_LIST_ACCUMULATION
DHDCFLAGS += -DCONFIG_CONTROL_PM
DHDCFLAGS += -DCUSTOM_GLOM_SETTING=0
DHDCFLAGS += -DPASS_ARP_PACKET
endif
ifneq ($(CONFIG_BCM43241),)
DHDCFLAGS += -DBCM43241_CHIP -DHW_OOB
DHDCFLAGS += -DMCAST_LIST_ACCUMULATION
DHDCFLAGS += -DMIMO_ANT_SETTING -DCONFIG_CONTROL_PM
DHDCFLAGS += -DAMPDU_HOSTREORDER -DDHD_USE_IDLECOUNT
DHDCFLAGS += -DCUSTOM_GLOM_SETTING=1
DHDCFLAGS += -DPROP_TXSTATUS
DHDCFLAGS += -DVSDB -DHT40_GO
DHDCFLAGS += -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
DHDCFLAGS += -DSUPPORT_AMPDU_MPDU_CMD
DHDCFLAGS += -DROAM_AP_ENV_DETECTION
ifeq ($(CONFIG_BCM43241),m)
DHDCFLAGS += -fno-pic
endif
endif
#############################
# Platform dependent feature
#############################
ifeq ($(CONFIG_SPI_SC8810),y)
DHDCFLAGS += -DREAD_MACADDR -DBCMSPI -DBCMSPI_ANDROID -DSPI_PIO_32BIT_RW -DSPI_PIO_RW_BIGENDIAN -DDISABLE_11N
#Remove defines for SDMMC
DHDCFLAGS :=$(filter-out -DOOB_INTR_ONLY,$(DHDCFLAGS))
DHDCFLAGS :=$(filter-out -DBCMLXSDMMC,$(DHDCFLAGS))
#Remove defines for JB
DHDCFLAGS :=$(filter-out -DWL_ENABLE_P2P_IF,$(DHDCFLAGS))
DHDCFLAGS :=$(filter-out -DMULTIPLE_SUPPLICANT,$(DHDCFLAGS))
DHDCFLAGS :=$(filter-out -DWL_CFG80211_STA_EVENT,$(DHDCFLAGS))
endif
#For INITIAL 2G scan features
ifneq ($(CONFIG_TARGET_LOCALE_KOR),y)
DHDCFLAGS += -DUSE_INITIAL_2G_SCAN
endif
# For SLP feature
ifeq ($(CONFIG_SLP),y)
DHDCFLAGS += -DPLATFORM_SLP
DHDCFLAGS += -UWL_ENABLE_P2P_IF
DHDCFLAGS += -UMULTIPLE_SUPPLICANT
DHDCFLAGS += -UWL_CFG80211_STA_EVENT
endif
# GGSM_WIFI_5GHz_CHANNELS feature is define for only GGSM model
ifeq ($(GGSM_WIFI_5GHz_CHANNELS),true)
DHDCFLAGS += -DCUSTOMER_SET_COUNTRY
endif
##############################################################
# dhd_sec_feature.h
REGION_CODE := 100
ifeq ($(CONFIG_TARGET_LOCALE_KOR),y)
REGION_CODE=200
endif
ifeq ($(CONFIG_MACH_U1_KOR_KT), y)
REGION_CODE=202
endif
ifeq ($(CONFIG_TARGET_LOCALE_CHN),y)
REGION_CODE=300
endif
ifeq ($(SEC_MODEL_NAME),U1)
ifeq ($(X_BUILD_LOCALE),EUR_ORG)
REGION_CODE=101
endif
endif
ifdef CONFIG_WLAN_REGION_CODE
REGION_CODE=$(CONFIG_WLAN_REGION_CODE)
endif
DHDCFLAGS += -DWLAN_REGION_CODE=$(REGION_CODE)
##############################################################
#########
# Others
#########
#EXTRA_LDFLAGS += --strip-debug
EXTRA_CFLAGS += $(DHDCFLAGS) -DDHD_DEBUG
EXTRA_CFLAGS += -DSRCBASE=\"$(src)\"
EXTRA_CFLAGS += -I$(src)/include/ -I$(src)/
KBUILD_CFLAGS += -I$(LINUXDIR)/include -I$(shell pwd)
DHDOFILES := bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
dhd_cdc.o dhd_common.o dhd_custom_gpio.o dhd_custom_sec.o \
dhd_linux.o dhd_linux_sched.o dhd_cfg80211.o dhd_sdio.o aiutils.o bcmevent.o \
bcmutils.o bcmwifi_channels.o hndpmu.o linux_osl.o sbutils.o siutils.o \
wl_android.o wl_cfg80211.o wl_cfgp2p.o wldev_common.o wl_linux_mon.o wl_roam.o
# For SPI projects
ifeq ($(CONFIG_SPI_SC8810),y)
DHDOFILES += bcmsdspi_linux.o bcmspibrcm.o
DHDOFILES :=$(filter-out bcmsdh_sdmmc.o,$(DHDOFILES))
DHDOFILES :=$(filter-out bcmsdh_sdmmc_linux.o,$(DHDOFILES))
endif
dhd-y := $(DHDOFILES)
obj-m += dhd.o
all:
@echo "$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules"
@$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules
clean:
rm -rf *.o *.ko *.mod.c *~ .*.cmd *.o.cmd .*.o.cmd \
Module.symvers modules.order .tmp_versions modules.builtin
install:
@$(MAKE) --no-print-directory -C $(KDIR) \
SUBDIRS=$(CURDIR) modules_install
| ryrzy/yoda-kernel-i9300-JB-update11 | drivers/net/wireless/bcmdhd/Makefile | Makefile | gpl-2.0 | 6,404 |
/* $NoKeywords:$ */
/**
* @file
*
* Service procedure to calculate PCIe topology segment maximum exit latency
*
*
*
* @xrefitem bom "File Content Label" "Release Content"
* @e project: AGESA
* @e sub-project: GNB
* @e \$Revision: 48452 $ @e \$Date: 2011-03-09 12:50:44 +0800 (Wed, 09 Mar 2011) $
*
*/
/*
*****************************************************************************
*
* Copyright (c) 2011, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ***************************************************************************
*
*/
/*----------------------------------------------------------------------------------------
* M O D U L E S U S E D
*----------------------------------------------------------------------------------------
*/
#include "AGESA.h"
#include "Ids.h"
#include "Gnb.h"
#include "GnbPcie.h"
#include "GnbCommonLib.h"
#include "GnbPcieInitLibV1.h"
#include "Filecode.h"
#define FILECODE PROC_GNB_MODULES_GNBPCIEINITLIBV1_PCIEASPMEXITLATENCY_FILECODE
/*----------------------------------------------------------------------------------------
* D E F I N I T I O N S A N D M A C R O S
*----------------------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------------------
* T Y P E D E F S A N D S T R U C T U R E S
*----------------------------------------------------------------------------------------
*/
typedef struct {
GNB_PCI_SCAN_DATA ScanData;
PCIe_ASPM_LATENCY_INFO *AspmLatencyInfo;
PCI_ADDR DownstreamPort;
UINT8 LinkCount;
} PCIE_EXIT_LATENCY_DATA;
/*----------------------------------------------------------------------------------------
* P R O T O T Y P E S O F L O C A L F U N C T I O N S
*----------------------------------------------------------------------------------------
*/
SCAN_STATUS
PcieAspmGetMaxExitLatencyCallback (
IN PCI_ADDR Device,
IN OUT GNB_PCI_SCAN_DATA *ScanData
);
/*----------------------------------------------------------------------------------------*/
/**
* Determine ASPM L-state maximum exit latency for PCIe segment
*
* Scan through all link in segment to determine maxim exit latency requirement by EPs.
*
* @param[in] DownstreamPort PCI address of PCIe port
* @param[out] AspmLatencyInfo Latency info
* @param[in] StdHeader Standard configuration header
*
*/
VOID
PcieAspmGetMaxExitLatency (
IN PCI_ADDR DownstreamPort,
OUT PCIe_ASPM_LATENCY_INFO *AspmLatencyInfo,
IN AMD_CONFIG_PARAMS *StdHeader
)
{
PCIE_EXIT_LATENCY_DATA PcieExitLatencyData;
PcieExitLatencyData.AspmLatencyInfo = AspmLatencyInfo;
PcieExitLatencyData.ScanData.StdHeader = StdHeader;
PcieExitLatencyData.LinkCount = 0;
PcieExitLatencyData.ScanData.GnbScanCallback = PcieAspmGetMaxExitLatencyCallback;
GnbLibPciScan (DownstreamPort, DownstreamPort, &PcieExitLatencyData.ScanData);
}
/*----------------------------------------------------------------------------------------*/
/**
* Evaluate device
*
*
*
* @param[in] Device PCI Address
* @param[in,out] ScanData Scan configuration data
* @retval Scan Status of 0
*/
SCAN_STATUS
PcieAspmGetMaxExitLatencyCallback (
IN PCI_ADDR Device,
IN OUT GNB_PCI_SCAN_DATA *ScanData
)
{
SCAN_STATUS ScanStatus;
PCIE_EXIT_LATENCY_DATA *PcieExitLatencyData;
PCIE_DEVICE_TYPE DeviceType;
UINT32 Value;
UINT8 PcieCapPtr;
UINT8 L1AcceptableLatency;
PcieExitLatencyData = (PCIE_EXIT_LATENCY_DATA*) ScanData;
ScanStatus = SCAN_SUCCESS;
DeviceType = GnbLibGetPcieDeviceType (Device, ScanData->StdHeader);
IDS_HDT_CONSOLE (GNB_TRACE, " PcieAspmGetMaxExitLatencyCallback for Device = %d:%d:%d\n",
Device.Address.Bus,
Device.Address.Device,
Device.Address.Function
);
switch (DeviceType) {
case PcieDeviceRootComplex:
case PcieDeviceDownstreamPort:
PcieExitLatencyData->DownstreamPort = Device;
PcieExitLatencyData->LinkCount++;
GnbLibPciScanSecondaryBus (Device, &PcieExitLatencyData->ScanData);
PcieExitLatencyData->LinkCount--;
break;
case PcieDeviceUpstreamPort:
GnbLibPciScanSecondaryBus (Device, &PcieExitLatencyData->ScanData);
break;
case PcieDeviceEndPoint:
case PcieDeviceLegacyEndPoint:
PcieCapPtr = GnbLibFindPciCapability (Device.AddressValue, PCIE_CAP_ID, ScanData->StdHeader);
ASSERT (PcieCapPtr != 0);
GnbLibPciRead (
Device.AddressValue | (PcieCapPtr + PCIE_LINK_CAP_REGISTER),
AccessWidth32,
&Value,
ScanData->StdHeader
);
if ((Value & PCIE_ASPM_L1_SUPPORT_CAP) != 0) {
GnbLibPciRead (
Device.AddressValue | (PcieCapPtr + PCIE_DEVICE_CAP_REGISTER),
AccessWidth32,
&Value,
ScanData->StdHeader
);
L1AcceptableLatency = (UINT8) (1 << ((Value >> 9) & 0x7));
if (PcieExitLatencyData->LinkCount > 1) {
L1AcceptableLatency = L1AcceptableLatency + PcieExitLatencyData->LinkCount;
}
if (PcieExitLatencyData->AspmLatencyInfo->MaxL1ExitLatency < L1AcceptableLatency) {
PcieExitLatencyData->AspmLatencyInfo->MaxL1ExitLatency = L1AcceptableLatency;
}
IDS_HDT_CONSOLE (PCIE_MISC, " Device max exit latency L1 - %d us\n",
L1AcceptableLatency
);
}
break;
default:
break;
}
return SCAN_SUCCESS;
}
| DarkDefender/coreboot | src/vendorcode/amd/agesa/f12/Proc/GNB/Modules/GnbPcieInitLibV1/PcieAspmExitLatency.c | C | gpl-2.0 | 7,237 |
require "test_helper"
class DiaryEntryTest < ActiveSupport::TestCase
api_fixtures
fixtures :diary_entries, :diary_comments, :languages
def test_diary_entry_count
assert_equal 6, DiaryEntry.count
end
def test_diary_entry_validations
diary_entry_valid({})
diary_entry_valid({ :title => "" }, false)
diary_entry_valid(:title => "a" * 255)
diary_entry_valid({ :title => "a" * 256 }, false)
diary_entry_valid({ :body => "" }, false)
diary_entry_valid(:latitude => 90)
diary_entry_valid({ :latitude => 90.00001 }, false)
diary_entry_valid(:latitude => -90)
diary_entry_valid({ :latitude => -90.00001 }, false)
diary_entry_valid(:longitude => 180)
diary_entry_valid({ :longitude => 180.00001 }, false)
diary_entry_valid(:longitude => -180)
diary_entry_valid({ :longitude => -180.00001 }, false)
end
def test_diary_entry_visible
assert_equal 5, DiaryEntry.visible.count
assert_raise ActiveRecord::RecordNotFound do
DiaryEntry.visible.find(diary_entries(:deleted_entry).id)
end
end
def test_diary_entry_comments
assert_equal 0, diary_entries(:normal_user_entry_1).comments.count
assert_equal 4, diary_entries(:normal_user_geo_entry).comments.count
end
def test_diary_entry_visible_comments
assert_equal 0, diary_entries(:normal_user_entry_1).visible_comments.count
assert_equal 1, diary_entries(:normal_user_geo_entry).visible_comments.count
end
private
def diary_entry_valid(attrs, result = true)
entry = DiaryEntry.new(diary_entries(:normal_user_entry_1).attributes)
entry.assign_attributes(attrs)
assert_equal result, entry.valid?, "Expected #{attrs.inspect} to be #{result}"
end
end
| COLABORATI/openstreetmap-website | test/models/diary_entry_test.rb | Ruby | gpl-2.0 | 1,718 |
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <stdarg.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#include "e1000_logs.h"
#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
/*
* Default values for port configuration
*/
#define IGB_DEFAULT_RX_FREE_THRESH 32
#define IGB_DEFAULT_RX_PTHRESH 8
#define IGB_DEFAULT_RX_HTHRESH 8
#define IGB_DEFAULT_RX_WTHRESH 0
#define IGB_DEFAULT_TX_PTHRESH 32
#define IGB_DEFAULT_TX_HTHRESH 0
#define IGB_DEFAULT_TX_WTHRESH 0
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
#define IGB_8_BIT_WIDTH CHAR_BIT
#define IGB_8_BIT_MASK UINT8_MAX
static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_igb_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static void eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static void eth_igb_stats_reset(struct rte_eth_dev *dev);
static void eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
void *param);
static int igb_hardware_init(struct e1000_hw *hw);
static void igb_hw_control_acquire(struct e1000_hw *hw);
static void igb_hw_control_release(struct e1000_hw *hw);
static void igb_init_manageability(struct e1000_hw *hw);
static void igb_release_manageability(struct e1000_hw *hw);
static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
static int eth_igb_led_on(struct rte_eth_dev *dev);
static int eth_igb_led_off(struct rte_eth_dev *dev);
static void igb_intr_disable(struct e1000_hw *hw);
static int igb_get_rx_buffer_size(struct e1000_hw *hw);
static void eth_igb_rar_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
static void igbvf_intr_disable(struct e1000_hw *hw);
static int igbvf_dev_configure(struct rte_eth_dev *dev);
static int igbvf_dev_start(struct rte_eth_dev *dev);
static void igbvf_dev_stop(struct rte_eth_dev *dev);
static void igbvf_dev_close(struct rte_eth_dev *dev);
static int eth_igbvf_link_update(struct e1000_hw *hw);
static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add);
static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter);
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter,
bool add);
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add);
static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter);
static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
#define UPDATE_VF_STAT(reg, last, cur) \
{ \
u32 latest = E1000_READ_REG(hw, reg); \
cur += latest - last; \
last = latest; \
}
#define IGB_FC_PAUSE_TIME 0x0680
#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
/*
* The set of PCI devices this driver supports
*/
static struct rte_pci_id pci_id_igb_map[] = {
#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{.device_id = 0},
};
/*
* The set of PCI devices this driver supports (for 82576&I350 VF)
*/
static struct rte_pci_id pci_id_igbvf_map[] = {
#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{.device_id = 0},
};
static struct eth_dev_ops eth_igb_ops = {
.dev_configure = eth_igb_configure,
.dev_start = eth_igb_start,
.dev_stop = eth_igb_stop,
.dev_close = eth_igb_close,
.promiscuous_enable = eth_igb_promiscuous_enable,
.promiscuous_disable = eth_igb_promiscuous_disable,
.allmulticast_enable = eth_igb_allmulticast_enable,
.allmulticast_disable = eth_igb_allmulticast_disable,
.link_update = eth_igb_link_update,
.stats_get = eth_igb_stats_get,
.stats_reset = eth_igb_stats_reset,
.dev_infos_get = eth_igb_infos_get,
.mtu_set = eth_igb_mtu_set,
.vlan_filter_set = eth_igb_vlan_filter_set,
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.vlan_offload_set = eth_igb_vlan_offload_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.dev_led_on = eth_igb_led_on,
.dev_led_off = eth_igb_led_off,
.flow_ctrl_get = eth_igb_flow_ctrl_get,
.flow_ctrl_set = eth_igb_flow_ctrl_set,
.mac_addr_add = eth_igb_rar_set,
.mac_addr_remove = eth_igb_rar_clear,
.reta_update = eth_igb_rss_reta_update,
.reta_query = eth_igb_rss_reta_query,
.rss_hash_update = eth_igb_rss_hash_update,
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
.filter_ctrl = eth_igb_filter_ctrl,
};
/*
* dev_ops for virtual function, bare necessities for basic vf
* operation have been implemented
*/
static struct eth_dev_ops igbvf_eth_dev_ops = {
.dev_configure = igbvf_dev_configure,
.dev_start = igbvf_dev_start,
.dev_stop = igbvf_dev_stop,
.dev_close = igbvf_dev_close,
.link_update = eth_igb_link_update,
.stats_get = eth_igbvf_stats_get,
.stats_reset = eth_igbvf_stats_reset,
.vlan_filter_set = igbvf_vlan_filter_set,
.dev_infos_get = eth_igbvf_infos_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
};
/**
* Atomically reads the link status information from global
* structure rte_eth_dev.
*
* @param dev
* - Pointer to the structure rte_eth_dev to read from.
* - Pointer to the buffer to be saved with the link status.
*
* @return
* - On success, zero.
* - On failure, negative value.
*/
static inline int
rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
struct rte_eth_link *dst = link;
struct rte_eth_link *src = &(dev->data->dev_link);
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return -1;
return 0;
}
/**
* Atomically writes the link status information into global
* structure rte_eth_dev.
*
* @param dev
* - Pointer to the structure rte_eth_dev to read from.
* - Pointer to the buffer to be saved with the link status.
*
* @return
* - On success, zero.
* - On failure, negative value.
*/
static inline int
rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
struct rte_eth_link *dst = &(dev->data->dev_link);
struct rte_eth_link *src = link;
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return -1;
return 0;
}
static inline void
igb_intr_enable(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
E1000_WRITE_FLUSH(hw);
}
static void
igb_intr_disable(struct e1000_hw *hw)
{
E1000_WRITE_REG(hw, E1000_IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
static inline int32_t
igb_pf_reset_hw(struct e1000_hw *hw)
{
uint32_t ctrl_ext;
int32_t status;
status = e1000_reset_hw(hw);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
E1000_WRITE_FLUSH(hw);
return status;
}
static void
igb_identify_hardware(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw->vendor_id = dev->pci_dev->id.vendor_id;
hw->device_id = dev->pci_dev->id.device_id;
hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
e1000_set_mac_type(hw);
/* need to check if it is a vf device below */
}
static int
igb_reset_swfw_lock(struct e1000_hw *hw)
{
int ret_val;
/*
* Do mac ops initialization manually here, since we will need
* some function pointers set by this call.
*/
ret_val = e1000_init_mac_params(hw);
if (ret_val)
return ret_val;
/*
* SMBI lock should not fail in this early stage. If this is the case,
* it is due to an improper exit of the application.
* So force the release of the faulty lock.
*/
if (e1000_get_hw_semaphore_generic(hw) < 0) {
PMD_DRV_LOG(DEBUG, "SMBI lock released");
}
e1000_put_hw_semaphore_generic(hw);
if (hw->mac.ops.acquire_swfw_sync != NULL) {
uint16_t mask;
/*
* Phy lock should not fail in this early stage. If this is the case,
* it is due to an improper exit of the application.
* So force the release of the faulty lock.
*/
mask = E1000_SWFW_PHY0_SM << hw->bus.func;
if (hw->bus.func > E1000_FUNC_1)
mask <<= 2;
if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
hw->bus.func);
}
hw->mac.ops.release_swfw_sync(hw, mask);
/*
* This one is more tricky since it is common to all ports; but
* swfw_sync retries last long enough (1s) to be almost sure that if
* lock can not be taken it is due to an improper lock of the
* semaphore.
*/
mask = E1000_SWFW_EEP_SM;
if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
PMD_DRV_LOG(DEBUG, "SWFW common locks released");
}
hw->mac.ops.release_swfw_sync(hw, mask);
}
return E1000_SUCCESS;
}
static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
struct rte_pci_device *pci_dev;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
uint32_t ctrl_ext;
pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = ð_igb_ops;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
if (eth_dev->data->scattered_rx)
eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
return 0;
}
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
igb_identify_hardware(eth_dev);
if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
}
e1000_get_bus_info(hw);
/* Reset any pending lock */
if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
}
/* Finish initialization */
if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
}
hw->mac.autoneg = 1;
hw->phy.autoneg_wait_to_complete = 0;
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
hw->phy.mdix = 0; /* AUTO_ALL_MODES */
hw->phy.disable_polarity_correction = 0;
hw->phy.ms_type = e1000_ms_hw_default;
}
/*
* Start from a known state, this is important in reading the nvm
* and mac from that.
*/
igb_pf_reset_hw(hw);
/* Make sure we have a good EEPROM before we read from it */
if (e1000_validate_nvm_checksum(hw) < 0) {
/*
* Some PCI-E parts fail the first check due to
* the link being in sleep state, call it again,
* if it fails a second time its a real issue.
*/
if (e1000_validate_nvm_checksum(hw) < 0) {
PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
error = -EIO;
goto err_late;
}
}
/* Read the permanent MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw) != 0) {
PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
error = -EIO;
goto err_late;
}
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("e1000",
ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
"store MAC addresses",
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
error = -ENOMEM;
goto err_late;
}
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]);
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
/* Now initialize the hardware */
if (igb_hardware_init(hw) != 0) {
PMD_INIT_LOG(ERR, "Hardware initialization failed");
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
error = -ENODEV;
goto err_late;
}
hw->mac.get_link_status = 1;
/* Indicate SOL/IDER usage */
if (e1000_check_reset_block(hw) < 0) {
PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
"SOL/IDER session");
}
/* initialize PF if max_vfs not zero */
igb_pf_host_init(eth_dev);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
E1000_WRITE_FLUSH(hw);
PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
rte_intr_callback_register(&(pci_dev->intr_handle),
eth_igb_interrupt_handler, (void *)eth_dev);
/* enable uio intr after callback register */
rte_intr_enable(&(pci_dev->intr_handle));
/* enable support intr */
igb_intr_enable(eth_dev);
TAILQ_INIT(&filter_info->flex_list);
filter_info->flex_mask = 0;
TAILQ_INIT(&filter_info->twotuple_list);
filter_info->twotuple_mask = 0;
TAILQ_INIT(&filter_info->fivetuple_list);
filter_info->fivetuple_mask = 0;
return 0;
err_late:
igb_hw_control_release(hw);
return (error);
}
/*
* Virtual Function device init
*/
static int
eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int diag;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &igbvf_eth_dev_ops;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
if (eth_dev->data->scattered_rx)
eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
return 0;
}
pci_dev = eth_dev->pci_dev;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
/* Initialize the shared code (base driver) */
diag = e1000_setup_init_funcs(hw, TRUE);
if (diag != 0) {
PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
diag);
return -EIO;
}
/* init_mailbox_params */
hw->mbx.ops.init_params(hw);
/* Disable the interrupts for VF */
igbvf_intr_disable(hw);
diag = hw->mac.ops.reset_hw(hw);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
hw->mac.rar_entry_count, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC "
"addresses",
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
return -ENOMEM;
}
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
ð_dev->data->mac_addrs[0]);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
"mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "igb_mac_82576_vf");
return 0;
}
static struct eth_driver rte_igb_pmd = {
{
.name = "rte_igb_pmd",
.id_table = pci_id_igb_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
},
.eth_dev_init = eth_igb_dev_init,
.dev_private_size = sizeof(struct e1000_adapter),
};
/*
* virtual function driver struct
*/
static struct eth_driver rte_igbvf_pmd = {
{
.name = "rte_igbvf_pmd",
.id_table = pci_id_igbvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
},
.eth_dev_init = eth_igbvf_dev_init,
.dev_private_size = sizeof(struct e1000_adapter),
};
static int
rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
{
rte_eth_driver_register(&rte_igb_pmd);
return 0;
}
static void
igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* RCTL: enable VLAN filter since VMDq always use VLAN filter */
uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
/*
* VF Driver initialization routine.
* Invoked one at EAL init time.
* Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
*/
static int
rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
{
PMD_INIT_FUNC_TRACE();
rte_eth_driver_register(&rte_igbvf_pmd);
return (0);
}
static int
eth_igb_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
PMD_INIT_FUNC_TRACE();
return (0);
}
static int
eth_igb_start(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret, i, mask;
uint32_t ctrl_ext;
PMD_INIT_FUNC_TRACE();
/* Power up the phy. Needed to make the link go Up */
e1000_power_up_phy(hw);
/*
* Packet Buffer Allocation (PBA)
* Writing PBA sets the receive portion of the buffer
* the remainder is used for the transmit buffer.
*/
if (hw->mac.type == e1000_82575) {
uint32_t pba;
pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
E1000_WRITE_REG(hw, E1000_PBA, pba);
}
/* Put the address into the Receive Address Array */
e1000_rar_set(hw, hw->mac.addr, 0);
/* Initialize the hardware */
if (igb_hardware_init(hw)) {
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
return (-EIO);
}
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
E1000_WRITE_FLUSH(hw);
/* configure PF module if SRIOV enabled */
igb_pf_host_configure(dev);
/* Configure for OS presence */
igb_init_manageability(hw);
eth_igb_tx_init(dev);
/* This can fail when allocating mbufs for descriptor rings */
ret = eth_igb_rx_init(dev);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
igb_dev_clear_queues(dev);
return ret;
}
e1000_clear_hw_cntrs_base_generic(hw);
/*
* VLAN Offload Settings
*/
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
eth_igb_vlan_offload_set(dev, mask);
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
/* Enable VLAN filter since VMDq always use VLAN filter */
igb_vmdq_vlan_hw_filter_enable(dev);
}
/*
* Configure the Interrupt Moderation register (EITR) with the maximum
* possible value (0xFFFF) to minimize "System Partial Write" issued by
* spurious [DMA] memory updates of RX and TX ring descriptors.
*
* With a EITR granularity of 2 microseconds in the 82576, only 7/8
* spurious memory updates per second should be expected.
* ((65535 * 2) / 1000.1000 ~= 0.131 second).
*
* Because interrupts are not used at all, the MSI-X is not activated
* and interrupt moderation is controlled by EITR[0].
*
* Note that having [almost] disabled memory updates of RX and TX ring
* descriptors through the Interrupt Moderation mechanism, memory
* updates of ring descriptors are now moderated by the configurable
* value of Write-Back Threshold registers.
*/
if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
(hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
uint32_t ivar;
/* Enable all RX & TX queues in the IVAR registers */
ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
for (i = 0; i < 8; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
/* Configure EITR with the maximum possible value (0xFFFF) */
E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
}
/* Setup link speed and duplex */
switch (dev->data->dev_conf.link_speed) {
case ETH_LINK_SPEED_AUTONEG:
if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
else
goto error_invalid_config;
break;
case ETH_LINK_SPEED_10:
if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
else
goto error_invalid_config;
break;
case ETH_LINK_SPEED_100:
if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
else
goto error_invalid_config;
break;
case ETH_LINK_SPEED_1000:
if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
(dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
else
goto error_invalid_config;
break;
case ETH_LINK_SPEED_10000:
default:
goto error_invalid_config;
}
e1000_setup_link(hw);
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
ret = eth_igb_lsc_interrupt_setup(dev);
/* resume enabled intr since hw reset */
igb_intr_enable(dev);
PMD_INIT_LOG(DEBUG, "<<");
return (0);
error_invalid_config:
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
dev->data->dev_conf.link_speed,
dev->data->dev_conf.link_duplex, dev->data->port_id);
igb_dev_clear_queues(dev);
return (-EINVAL);
}
/*********************************************************************
*
* This routine disables all traffic on the adapter by issuing a
* global reset on the MAC.
*
**********************************************************************/
static void
eth_igb_stop(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct rte_eth_link link;
struct e1000_flex_filter *p_flex;
struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
igb_intr_disable(hw);
igb_pf_reset_hw(hw);
E1000_WRITE_REG(hw, E1000_WUC, 0);
/* Set bit for Go Link disconnect */
if (hw->mac.type >= e1000_82580) {
uint32_t phpm_reg;
phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
phpm_reg |= E1000_82580_PM_GO_LINKD;
E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
}
/* Power down the phy. Needed to make the link go Down */
if (hw->phy.media_type == e1000_media_type_copper)
e1000_power_down_phy(hw);
else
e1000_shutdown_fiber_serdes_link(hw);
igb_dev_clear_queues(dev);
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
/* Remove all flex filters of the device */
while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
rte_free(p_flex);
}
filter_info->flex_mask = 0;
/* Remove all ntuple filters of the device */
for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
p_5tuple != NULL; p_5tuple = p_5tuple_next) {
p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
TAILQ_REMOVE(&filter_info->fivetuple_list,
p_5tuple, entries);
rte_free(p_5tuple);
}
filter_info->fivetuple_mask = 0;
for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
p_2tuple != NULL; p_2tuple = p_2tuple_next) {
p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
TAILQ_REMOVE(&filter_info->twotuple_list,
p_2tuple, entries);
rte_free(p_2tuple);
}
filter_info->twotuple_mask = 0;
}
static void
eth_igb_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link;
eth_igb_stop(dev);
e1000_phy_hw_reset(hw);
igb_release_manageability(hw);
igb_hw_control_release(hw);
/* Clear bit for Go Link disconnect */
if (hw->mac.type >= e1000_82580) {
uint32_t phpm_reg;
phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
phpm_reg &= ~E1000_82580_PM_GO_LINKD;
E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
}
igb_dev_clear_queues(dev);
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
}
static int
igb_get_rx_buffer_size(struct e1000_hw *hw)
{
uint32_t rx_buf_size;
if (hw->mac.type == e1000_82576) {
rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
} else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
/* PBS needs to be translated according to a lookup table */
rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
rx_buf_size = (rx_buf_size << 10);
} else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
} else {
rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
}
return rx_buf_size;
}
/*********************************************************************
*
* Initialize the hardware
*
**********************************************************************/
static int
igb_hardware_init(struct e1000_hw *hw)
{
uint32_t rx_buf_size;
int diag;
/* Let the firmware know the OS is in control */
igb_hw_control_acquire(hw);
/*
* These parameters control the automatic generation (Tx) and
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two standard size (1518)
* frames to be received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
* drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
* could be several smaller frames in the buffer and if so they will
* not trigger the XON until their total number reduces the buffer
* by 1500.
* - The pause time is fairly large at 1000 x 512ns = 512 usec.
*/
rx_buf_size = igb_get_rx_buffer_size(hw);
hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
hw->fc.low_water = hw->fc.high_water - 1500;
hw->fc.pause_time = IGB_FC_PAUSE_TIME;
hw->fc.send_xon = 1;
/* Set Flow control, use the tunable location if sane */
if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
hw->fc.requested_mode = igb_fc_setting;
else
hw->fc.requested_mode = e1000_fc_none;
/* Issue a global reset */
igb_pf_reset_hw(hw);
E1000_WRITE_REG(hw, E1000_WUC, 0);
diag = e1000_init_hw(hw);
if (diag < 0)
return (diag);
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
return (0);
}
/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
static void
eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_hw_stats *stats =
E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
int pause_frames;
if(hw->phy.media_type == e1000_media_type_copper ||
(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
stats->symerrs +=
E1000_READ_REG(hw,E1000_SYMERRS);
stats->sec += E1000_READ_REG(hw, E1000_SEC);
}
stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
stats->mpc += E1000_READ_REG(hw, E1000_MPC);
stats->scc += E1000_READ_REG(hw, E1000_SCC);
stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
stats->mcc += E1000_READ_REG(hw, E1000_MCC);
stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
stats->colc += E1000_READ_REG(hw, E1000_COLC);
stats->dc += E1000_READ_REG(hw, E1000_DC);
stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
/*
** For watchdog management we need to know if we have been
** paused during the last interval, so capture that here.
*/
pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
stats->xoffrxc += pause_frames;
stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
stats->ruc += E1000_READ_REG(hw, E1000_RUC);
stats->rfc += E1000_READ_REG(hw, E1000_RFC);
stats->roc += E1000_READ_REG(hw, E1000_ROC);
stats->rjc += E1000_READ_REG(hw, E1000_RJC);
stats->tor += E1000_READ_REG(hw, E1000_TORH);
stats->tot += E1000_READ_REG(hw, E1000_TOTH);
stats->tpr += E1000_READ_REG(hw, E1000_TPR);
stats->tpt += E1000_READ_REG(hw, E1000_TPT);
stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
/* Interrupt Counts */
stats->iac += E1000_READ_REG(hw, E1000_IAC);
stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
/* Host to Card Statistics */
stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
if (rte_stats == NULL)
return;
/* Rx Errors */
rte_stats->ibadcrc = stats->crcerrs;
rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
rte_stats->imissed = stats->mpc;
rte_stats->ierrors = rte_stats->ibadcrc +
rte_stats->ibadlen +
rte_stats->imissed +
stats->rxerrc + stats->algnerrc + stats->cexterr;
/* Tx Errors */
rte_stats->oerrors = stats->ecol + stats->latecol;
/* XON/XOFF pause frames */
rte_stats->tx_pause_xon = stats->xontxc;
rte_stats->rx_pause_xon = stats->xonrxc;
rte_stats->tx_pause_xoff = stats->xofftxc;
rte_stats->rx_pause_xoff = stats->xoffrxc;
rte_stats->ipackets = stats->gprc;
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
}
static void
eth_igb_stats_reset(struct rte_eth_dev *dev)
{
struct e1000_hw_stats *hw_stats =
E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* HW registers are cleared on read */
eth_igb_stats_get(dev, NULL);
/* Reset software totals */
memset(hw_stats, 0, sizeof(*hw_stats));
}
static void
eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packets, include VF loopback */
UPDATE_VF_STAT(E1000_VFGPRC,
hw_stats->last_gprc, hw_stats->gprc);
/* Good Rx octets, include VF loopback */
UPDATE_VF_STAT(E1000_VFGORC,
hw_stats->last_gorc, hw_stats->gorc);
/* Good Tx packets, include VF loopback */
UPDATE_VF_STAT(E1000_VFGPTC,
hw_stats->last_gptc, hw_stats->gptc);
/* Good Tx octets, include VF loopback */
UPDATE_VF_STAT(E1000_VFGOTC,
hw_stats->last_gotc, hw_stats->gotc);
/* Rx Multicst packets */
UPDATE_VF_STAT(E1000_VFMPRC,
hw_stats->last_mprc, hw_stats->mprc);
/* Good Rx loopback packets */
UPDATE_VF_STAT(E1000_VFGPRLBC,
hw_stats->last_gprlbc, hw_stats->gprlbc);
/* Good Rx loopback octets */
UPDATE_VF_STAT(E1000_VFGORLBC,
hw_stats->last_gorlbc, hw_stats->gorlbc);
/* Good Tx loopback packets */
UPDATE_VF_STAT(E1000_VFGPTLBC,
hw_stats->last_gptlbc, hw_stats->gptlbc);
/* Good Tx loopback octets */
UPDATE_VF_STAT(E1000_VFGOTLBC,
hw_stats->last_gotlbc, hw_stats->gotlbc);
if (rte_stats == NULL)
return;
rte_stats->ipackets = hw_stats->gprc;
rte_stats->ibytes = hw_stats->gorc;
rte_stats->opackets = hw_stats->gptc;
rte_stats->obytes = hw_stats->gotc;
rte_stats->imcasts = hw_stats->mprc;
rte_stats->ilbpackets = hw_stats->gprlbc;
rte_stats->ilbbytes = hw_stats->gorlbc;
rte_stats->olbpackets = hw_stats->gptlbc;
rte_stats->olbbytes = hw_stats->gotlbc;
}
static void
eth_igbvf_stats_reset(struct rte_eth_dev *dev)
{
struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
eth_igbvf_stats_get(dev, NULL);
/* reset HW current stats*/
memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
offsetof(struct e1000_vf_stats, gprc));
}
static void
eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM;
switch (hw->mac.type) {
case e1000_82575:
dev_info->max_rx_queues = 4;
dev_info->max_tx_queues = 4;
dev_info->max_vmdq_pools = 0;
break;
case e1000_82576:
dev_info->max_rx_queues = 16;
dev_info->max_tx_queues = 16;
dev_info->max_vmdq_pools = ETH_8_POOLS;
dev_info->vmdq_queue_num = 16;
break;
case e1000_82580:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
dev_info->max_vmdq_pools = ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
case e1000_i350:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
dev_info->max_vmdq_pools = ETH_8_POOLS;
dev_info->vmdq_queue_num = 8;
break;
case e1000_i354:
dev_info->max_rx_queues = 8;
dev_info->max_tx_queues = 8;
break;
case e1000_i210:
dev_info->max_rx_queues = 4;
dev_info->max_tx_queues = 4;
dev_info->max_vmdq_pools = 0;
break;
case e1000_i211:
dev_info->max_rx_queues = 2;
dev_info->max_tx_queues = 2;
dev_info->max_vmdq_pools = 0;
break;
default:
/* Should not happen */
break;
}
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
.hthresh = IGB_DEFAULT_RX_HTHRESH,
.wthresh = IGB_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = IGB_DEFAULT_TX_PTHRESH,
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
.txq_flags = 0,
};
}
static void
eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM;
switch (hw->mac.type) {
case e1000_vfadapt:
dev_info->max_rx_queues = 2;
dev_info->max_tx_queues = 2;
break;
case e1000_vfadapt_i350:
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
break;
default:
/* Should not happen */
break;
}
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
.hthresh = IGB_DEFAULT_RX_HTHRESH,
.wthresh = IGB_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = IGB_DEFAULT_TX_PTHRESH,
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
.txq_flags = 0,
};
}
/* return 0 means link status changed, -1 means not changed */
static int
eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
int link_check, count;
link_check = 0;
hw->mac.get_link_status = 1;
/* possible wait-to-complete in up to 9 seconds */
for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
/* Read the real link status */
switch (hw->phy.media_type) {
case e1000_media_type_copper:
/* Do the work to read phy */
e1000_check_for_link(hw);
link_check = !hw->mac.get_link_status;
break;
case e1000_media_type_fiber:
e1000_check_for_link(hw);
link_check = (E1000_READ_REG(hw, E1000_STATUS) &
E1000_STATUS_LU);
break;
case e1000_media_type_internal_serdes:
e1000_check_for_link(hw);
link_check = hw->mac.serdes_has_link;
break;
/* VF device is type_unknown */
case e1000_media_type_unknown:
eth_igbvf_link_update(hw);
link_check = !hw->mac.get_link_status;
break;
default:
break;
}
if (link_check || wait_to_complete == 0)
break;
rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_read_link_status(dev, &link);
old = link;
/* Now we check if a transition has happened */
if (link_check) {
hw->mac.ops.get_link_up_info(hw, &link.link_speed,
&link.link_duplex);
link.link_status = 1;
} else if (!link_check) {
link.link_speed = 0;
link.link_duplex = 0;
link.link_status = 0;
}
rte_igb_dev_atomic_write_link_status(dev, &link);
/* not changed */
if (old.link_status == link.link_status)
return -1;
/* changed */
return 0;
}
/*
* igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means
* that the driver is loaded.
*/
static void
igb_hw_control_acquire(struct e1000_hw *hw)
{
uint32_t ctrl_ext;
/* Let firmware know the driver has taken over */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
/*
* igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded.
*/
static void
igb_hw_control_release(struct e1000_hw *hw)
{
uint32_t ctrl_ext;
/* Let firmware taken over control of h/w */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
E1000_WRITE_REG(hw, E1000_CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
}
/*
* Bit of a misnomer, what this really means is
* to enable OS management of the system... aka
* to disable special hardware management features.
*/
static void
igb_init_manageability(struct e1000_hw *hw)
{
if (e1000_enable_mng_pass_thru(hw)) {
uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host */
manc |= E1000_MANC_EN_MNG2HOST;
manc2h |= 1 << 5; /* Mng Port 623 */
manc2h |= 1 << 6; /* Mng Port 664 */
E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
E1000_WRITE_REG(hw, E1000_MANC, manc);
}
}
static void
igb_release_manageability(struct e1000_hw *hw)
{
if (e1000_enable_mng_pass_thru(hw)) {
uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
E1000_WRITE_REG(hw, E1000_MANC, manc);
}
}
static void
eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rctl;
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
static void
eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rctl;
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= (~E1000_RCTL_UPE);
if (dev->data->all_multicast == 1)
rctl |= E1000_RCTL_MPE;
else
rctl &= (~E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
static void
eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rctl;
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
static void
eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rctl;
if (dev->data->promiscuous == 1)
return; /* must remain in all_multicast mode */
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= (~E1000_RCTL_MPE);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
static int
eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
uint32_t vid_bit;
vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK);
vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
if (on)
vfta |= vid_bit;
else
vfta &= ~vid_bit;
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
/* update local VFTA copy */
shadow_vfta->vfta[vid_idx] = vfta;
return 0;
}
static void
eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg = ETHER_TYPE_VLAN ;
reg |= (tpid << 16);
E1000_WRITE_REG(hw, E1000_VET, reg);
}
static void
igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
/* Filter Table Disable */
reg = E1000_READ_REG(hw, E1000_RCTL);
reg &= ~E1000_RCTL_CFIEN;
reg &= ~E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, reg);
}
static void
igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t reg;
int i;
/* Filter Table Enable, CFI not used for packet acceptance */
reg = E1000_READ_REG(hw, E1000_RCTL);
reg &= ~E1000_RCTL_CFIEN;
reg |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, reg);
/* restore VFTA table */
for (i = 0; i < IGB_VFTA_SIZE; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
}
static void
igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
/* VLAN Mode Disable */
reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~E1000_CTRL_VME;
E1000_WRITE_REG(hw, E1000_CTRL, reg);
}
static void
igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
/* VLAN Mode Enable */
reg = E1000_READ_REG(hw, E1000_CTRL);
reg |= E1000_CTRL_VME;
E1000_WRITE_REG(hw, E1000_CTRL, reg);
}
static void
igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
/* CTRL_EXT: Extended VLAN */
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
VLAN_TAG_SIZE);
}
static void
igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
/* CTRL_EXT: Extended VLAN */
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg |= E1000_CTRL_EXT_EXTEND_VLAN;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE);
}
static void
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if(mask & ETH_VLAN_STRIP_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
if(mask & ETH_VLAN_EXTEND_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
}
}
/**
* It enables the interrupt mask and then enable the interrupt.
*
* @param dev
* Pointer to struct rte_eth_dev.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
intr->mask |= E1000_ICR_LSC;
return 0;
}
/*
* It reads ICR and gets interrupt causes, check it and set a bit flag
* to update link status.
*
* @param dev
* Pointer to struct rte_eth_dev.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
{
uint32_t icr;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
igb_intr_disable(hw);
/* read-on-clear nic registers here */
icr = E1000_READ_REG(hw, E1000_ICR);
intr->flags = 0;
if (icr & E1000_ICR_LSC) {
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
}
if (icr & E1000_ICR_VMMB)
intr->flags |= E1000_FLAG_MAILBOX;
return 0;
}
/*
* It executes link_update after knowing an interrupt is prsent.
*
* @param dev
* Pointer to struct rte_eth_dev.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
eth_igb_interrupt_action(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
if (intr->flags & E1000_FLAG_MAILBOX) {
igb_pf_mbx_process(dev);
intr->flags &= ~E1000_FLAG_MAILBOX;
}
igb_intr_enable(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
/* set get_link_status to check register later */
hw->mac.get_link_status = 1;
ret = eth_igb_link_update(dev, 0);
/* check if link has changed */
if (ret < 0)
return 0;
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_read_link_status(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO,
" Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id,
(unsigned)link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
dev->data->port_id);
}
PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
dev->pci_dev->addr.function);
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
if (link.link_status) {
/* enable Tx/Rx */
tctl |= E1000_TCTL_EN;
rctl |= E1000_RCTL_EN;
} else {
/* disable Tx/Rx */
tctl &= ~E1000_TCTL_EN;
rctl &= ~E1000_RCTL_EN;
}
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
return 0;
}
/**
* Interrupt handler which shall be registered at first.
*
* @param handle
* Pointer to interrupt handle.
* @param param
* The address of parameter (struct rte_eth_dev *) regsitered before.
*
* @return
* void
*/
static void
eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igb_interrupt_get_status(dev);
eth_igb_interrupt_action(dev);
}
static int
eth_igb_led_on(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
}
static int
eth_igb_led_off(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
}
static int
eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct e1000_hw *hw;
uint32_t ctrl;
int tx_pause;
int rx_pause;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
fc_conf->pause_time = hw->fc.pause_time;
fc_conf->high_water = hw->fc.high_water;
fc_conf->low_water = hw->fc.low_water;
fc_conf->send_xon = hw->fc.send_xon;
fc_conf->autoneg = hw->mac.autoneg;
/*
* Return rx_pause and tx_pause status according to actual setting of
* the TFCE and RFCE bits in the CTRL register.
*/
ctrl = E1000_READ_REG(hw, E1000_CTRL);
if (ctrl & E1000_CTRL_TFCE)
tx_pause = 1;
else
tx_pause = 0;
if (ctrl & E1000_CTRL_RFCE)
rx_pause = 1;
else
rx_pause = 0;
if (rx_pause && tx_pause)
fc_conf->mode = RTE_FC_FULL;
else if (rx_pause)
fc_conf->mode = RTE_FC_RX_PAUSE;
else if (tx_pause)
fc_conf->mode = RTE_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
return 0;
}
static int
eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct e1000_hw *hw;
int err;
enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
e1000_fc_none,
e1000_fc_rx_pause,
e1000_fc_tx_pause,
e1000_fc_full
};
uint32_t rx_buf_size;
uint32_t max_high_water;
uint32_t rctl;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (fc_conf->autoneg != hw->mac.autoneg)
return -ENOTSUP;
rx_buf_size = igb_get_rx_buffer_size(hw);
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/* At least reserve one Ethernet frame for watermark */
max_high_water = rx_buf_size - ETHER_MAX_LEN;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
return (-EINVAL);
}
hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
hw->fc.pause_time = fc_conf->pause_time;
hw->fc.high_water = fc_conf->high_water;
hw->fc.low_water = fc_conf->low_water;
hw->fc.send_xon = fc_conf->send_xon;
err = e1000_setup_link_generic(hw);
if (err == E1000_SUCCESS) {
/* check if we want to forward MAC frames - driver doesn't have native
* capability to do that, so we'll write the registers ourselves */
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* set or clear MFLCN.PMCF bit depending on configuration */
if (fc_conf->mac_ctrl_frame_fwd != 0)
rctl |= E1000_RCTL_PMCF;
else
rctl &= ~E1000_RCTL_PMCF;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
return 0;
}
PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
return (-EIO);
}
#define E1000_RAH_POOLSEL_SHIFT (18)
static void
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rah;
e1000_rar_set(hw, mac_addr->addr_bytes, index);
rah = E1000_READ_REG(hw, E1000_RAH(index));
rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
E1000_WRITE_REG(hw, E1000_RAH(index), rah);
}
static void
eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
{
uint8_t addr[ETHER_ADDR_LEN];
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(addr, 0, sizeof(addr));
e1000_rar_set(hw, addr, index);
}
/*
* Virtual Function operations
*/
static void
igbvf_intr_disable(struct e1000_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
E1000_WRITE_FLUSH(hw);
}
static void
igbvf_stop_adapter(struct rte_eth_dev *dev)
{
u32 reg_val;
u16 i;
struct rte_eth_dev_info dev_info;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
memset(&dev_info, 0, sizeof(dev_info));
eth_igbvf_infos_get(dev, &dev_info);
/* Clear interrupt mask to stop from interrupts being generated */
igbvf_intr_disable(hw);
/* Clear any pending interrupts, flush previous writes */
E1000_READ_REG(hw, E1000_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
for (i = 0; i < dev_info.max_tx_queues; i++)
E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
/* Disable the receive unit by stopping each queue */
for (i = 0; i < dev_info.max_rx_queues; i++) {
reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
;
}
/* flush all queues disables */
E1000_WRITE_FLUSH(hw);
msec_delay(2);
}
static int eth_igbvf_link_update(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
struct e1000_mac_info *mac = &hw->mac;
int ret_val = E1000_SUCCESS;
PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
/*
* We only want to run this if there has been a rst asserted.
* in this case that could mean a link change, device reset,
* or a virtual function reset
*/
/* If we were hit with a reset or timeout drop the link */
if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
mac->get_link_status = TRUE;
if (!mac->get_link_status)
goto out;
/* if link status is down no point in checking to see if pf is up */
if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
goto out;
/* if we passed all the tests above then the link is up and we no
* longer need to check for link */
mac->get_link_status = FALSE;
out:
return ret_val;
}
static int
igbvf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
return 0;
}
static int
igbvf_dev_start(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
PMD_INIT_FUNC_TRACE();
hw->mac.ops.reset_hw(hw);
/* Set all vfta */
igbvf_set_vfta_all(dev,1);
eth_igbvf_tx_init(dev);
/* This can fail when allocating mbufs for descriptor rings */
ret = eth_igbvf_rx_init(dev);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
igb_dev_clear_queues(dev);
return ret;
}
return 0;
}
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
igbvf_stop_adapter(dev);
/*
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
igbvf_set_vfta_all(dev,0);
igb_dev_clear_queues(dev);
}
static void
igbvf_dev_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
e1000_reset_hw(hw);
igbvf_dev_stop(dev);
}
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
{
struct e1000_mbx_info *mbx = &hw->mbx;
uint32_t msgbuf[2];
/* After set vlan, vlan strip will also be enabled in igb driver*/
msgbuf[0] = E1000_VF_SET_VLAN;
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
if (on)
msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
}
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
for (i = 0; i < IGB_VFTA_SIZE; i++){
vfta = shadow_vfta->vfta[i];
if(vfta){
mask = 1;
for (j = 0; j < 32; j++){
if(vfta & mask)
igbvf_set_vfta(hw,
(uint16_t)((i<<5)+j), on);
mask<<=1;
}
}
}
}
static int
igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
int ret = 0;
PMD_INIT_FUNC_TRACE();
/*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
ret = igbvf_set_vfta(hw, vlan_id, !!on);
if(ret){
PMD_INIT_LOG(ERR, "Unable to set VF vlan");
return ret;
}
vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
/*Save what we set and retore it after device reset*/
if (on)
shadow_vfta->vfta[vid_idx] |= vid_bit;
else
shadow_vfta->vfta[vid_idx] &= ~vid_bit;
return 0;
}
static int
eth_igb_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
uint8_t i, j, mask;
uint32_t reta, r;
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
continue;
if (mask == IGB_4_BIT_MASK)
r = 0;
else
r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta |= reta_conf[idx].reta[shift + j] <<
(CHAR_BIT * j);
else
reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
}
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
}
return 0;
}
static int
eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
uint8_t i, j, mask;
uint32_t reta;
uint16_t idx, shift;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
IGB_4_BIT_MASK);
if (!mask)
continue;
reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta_conf[idx].reta[shift + j] =
((reta >> (CHAR_BIT * j)) &
IGB_8_BIT_MASK);
}
}
return 0;
}
#define MAC_TYPE_FILTER_SUP(type) do {\
if ((type) != e1000_82580 && (type) != e1000_i350 &&\
(type) != e1000_82576)\
return -ENOTSUP;\
} while (0)
static int
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t synqf, rfctl;
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
return -EINVAL;
synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
if (add) {
if (synqf & E1000_SYN_FILTER_ENABLE)
return -EINVAL;
synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
if (filter->hig_pri)
rfctl |= E1000_RFCTL_SYNQFP;
else
rfctl &= ~E1000_RFCTL_SYNQFP;
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
} else {
if (!(synqf & E1000_SYN_FILTER_ENABLE))
return -ENOENT;
synqf = 0;
}
E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
E1000_WRITE_FLUSH(hw);
return 0;
}
static int
eth_igb_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t synqf, rfctl;
synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
if (synqf & E1000_SYN_FILTER_ENABLE) {
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
E1000_SYN_FILTER_QUEUE_SHIFT);
return 0;
}
return -ENOENT;
}
static int
eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
MAC_TYPE_FILTER_SUP(hw->mac.type);
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (arg == NULL) {
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
filter_op);
return -EINVAL;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = eth_igb_syn_filter_set(dev,
(struct rte_eth_syn_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = eth_igb_syn_filter_set(dev,
(struct rte_eth_syn_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = eth_igb_syn_filter_get(dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
ret = -EINVAL;
break;
}
return ret;
}
#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
if ((type) != e1000_82580 && (type) != e1000_i350)\
return -ENOSYS; \
} while (0)
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
static inline int
ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
struct e1000_2tuple_filter_info *filter_info)
{
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
return -EINVAL;
if (filter->priority > E1000_2TUPLE_MAX_PRI)
return -EINVAL; /* filter index is out of range. */
if (filter->tcp_flags > TCP_FLAG_ALL)
return -EINVAL; /* flags is invalid. */
switch (filter->dst_port_mask) {
case UINT16_MAX:
filter_info->dst_port_mask = 0;
filter_info->dst_port = filter->dst_port;
break;
case 0:
filter_info->dst_port_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}
switch (filter->proto_mask) {
case UINT8_MAX:
filter_info->proto_mask = 0;
filter_info->proto = filter->proto;
break;
case 0:
filter_info->proto_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}
filter_info->priority = (uint8_t)filter->priority;
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
filter_info->tcp_flags = filter->tcp_flags;
else
filter_info->tcp_flags = 0;
return 0;
}
static inline struct e1000_2tuple_filter *
igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
struct e1000_2tuple_filter_info *key)
{
struct e1000_2tuple_filter *it;
TAILQ_FOREACH(it, filter_list, entries) {
if (memcmp(key, &it->filter_info,
sizeof(struct e1000_2tuple_filter_info)) == 0) {
return it;
}
}
return NULL;
}
/*
* igb_add_2tuple_filter - add a 2tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: ponter to the filter that will be added.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter *filter;
uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
int i, ret;
filter = rte_zmalloc("e1000_2tuple_filter",
sizeof(struct e1000_2tuple_filter), 0);
if (filter == NULL)
return -ENOMEM;
ret = ntuple_filter_to_2tuple(ntuple_filter,
&filter->filter_info);
if (ret < 0) {
rte_free(filter);
return ret;
}
if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
&filter->filter_info) != NULL) {
PMD_DRV_LOG(ERR, "filter exists.");
rte_free(filter);
return -EEXIST;
}
filter->queue = ntuple_filter->queue;
/*
* look for an unused 2tuple filter index,
* and insert the filter to list.
*/
for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
if (!(filter_info->twotuple_mask & (1 << i))) {
filter_info->twotuple_mask |= 1 << i;
filter->index = i;
TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
filter,
entries);
break;
}
}
if (i >= E1000_MAX_TTQF_FILTERS) {
PMD_DRV_LOG(ERR, "2tuple filters are full.");
rte_free(filter);
return -ENOSYS;
}
imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
imir |= E1000_IMIR_PORT_BP;
else
imir &= ~E1000_IMIR_PORT_BP;
imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
ttqf |= E1000_TTQF_QUEUE_ENABLE;
ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
if (filter->filter_info.proto_mask == 0)
ttqf &= ~E1000_TTQF_MASK_ENABLE;
/* tcp flags bits setting. */
if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_URG;
if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_ACK;
if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_PSH;
if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_RST;
if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_SYN;
if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_FIN;
} else
imir_ext |= E1000_IMIREXT_CTRL_BP;
E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
return 0;
}
/*
* igb_remove_2tuple_filter - remove a 2tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: ponter to the filter that will be removed.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter_info filter_2tuple;
struct e1000_2tuple_filter *filter;
int ret;
memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
ret = ntuple_filter_to_2tuple(ntuple_filter,
&filter_2tuple);
if (ret < 0)
return ret;
filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
&filter_2tuple);
if (filter == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
return -ENOENT;
}
filter_info->twotuple_mask &= ~(1 << filter->index);
TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
rte_free(filter);
E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
static inline struct e1000_flex_filter *
eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
struct e1000_flex_filter_info *key)
{
struct e1000_flex_filter *it;
TAILQ_FOREACH(it, filter_list, entries) {
if (memcmp(key, &it->filter_info,
sizeof(struct e1000_flex_filter_info)) == 0)
return it;
}
return NULL;
}
static int
eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_flex_filter *flex_filter, *it;
uint32_t wufc, queueing, mask;
uint32_t reg_off;
uint8_t shift, i, j = 0;
flex_filter = rte_zmalloc("e1000_flex_filter",
sizeof(struct e1000_flex_filter), 0);
if (flex_filter == NULL)
return -ENOMEM;
flex_filter->filter_info.len = filter->len;
flex_filter->filter_info.priority = filter->priority;
memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
mask = 0;
/* reverse bits in flex filter's mask*/
for (shift = 0; shift < CHAR_BIT; shift++) {
if (filter->mask[i] & (0x01 << shift))
mask |= (0x80 >> shift);
}
flex_filter->filter_info.mask[i] = mask;
}
wufc = E1000_READ_REG(hw, E1000_WUFC);
if (flex_filter->index < E1000_MAX_FHFT)
reg_off = E1000_FHFT(flex_filter->index);
else
reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
if (add) {
if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
&flex_filter->filter_info) != NULL) {
PMD_DRV_LOG(ERR, "filter exists.");
rte_free(flex_filter);
return -EEXIST;
}
flex_filter->queue = filter->queue;
/*
* look for an unused flex filter index
* and insert the filter into the list.
*/
for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
if (!(filter_info->flex_mask & (1 << i))) {
filter_info->flex_mask |= 1 << i;
flex_filter->index = i;
TAILQ_INSERT_TAIL(&filter_info->flex_list,
flex_filter,
entries);
break;
}
}
if (i >= E1000_MAX_FLEX_FILTERS) {
PMD_DRV_LOG(ERR, "flex filters are full.");
rte_free(flex_filter);
return -ENOSYS;
}
E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
(E1000_WUFC_FLX0 << flex_filter->index));
queueing = filter->len |
(filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
(filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
queueing);
for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
E1000_WRITE_REG(hw, reg_off,
flex_filter->filter_info.dwords[j]);
reg_off += sizeof(uint32_t);
E1000_WRITE_REG(hw, reg_off,
flex_filter->filter_info.dwords[++j]);
reg_off += sizeof(uint32_t);
E1000_WRITE_REG(hw, reg_off,
(uint32_t)flex_filter->filter_info.mask[i]);
reg_off += sizeof(uint32_t) * 2;
++j;
}
} else {
it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
&flex_filter->filter_info);
if (it == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
rte_free(flex_filter);
return -ENOENT;
}
for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
E1000_WRITE_REG(hw, E1000_WUFC, wufc &
(~(E1000_WUFC_FLX0 << it->index)));
filter_info->flex_mask &= ~(1 << it->index);
TAILQ_REMOVE(&filter_info->flex_list, it, entries);
rte_free(it);
rte_free(flex_filter);
}
return 0;
}
static int
eth_igb_get_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_flex_filter flex_filter, *it;
uint32_t wufc, queueing, wufc_en = 0;
memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
flex_filter.filter_info.len = filter->len;
flex_filter.filter_info.priority = filter->priority;
memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
memcpy(flex_filter.filter_info.mask, filter->mask,
RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
&flex_filter.filter_info);
if (it == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
return -ENOENT;
}
wufc = E1000_READ_REG(hw, E1000_WUFC);
wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
if ((wufc & wufc_en) == wufc_en) {
uint32_t reg_off = 0;
if (it->index < E1000_MAX_FHFT)
reg_off = E1000_FHFT(it->index);
else
reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
queueing = E1000_READ_REG(hw,
reg_off + E1000_FHFT_QUEUEING_OFFSET);
filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
E1000_FHFT_QUEUEING_PRIO_SHIFT;
filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
E1000_FHFT_QUEUEING_QUEUE_SHIFT;
return 0;
}
return -ENOENT;
}
static int
eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_flex_filter *filter;
int ret = 0;
MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
if (filter_op == RTE_ETH_FILTER_NOP)
return ret;
if (arg == NULL) {
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
filter_op);
return -EINVAL;
}
filter = (struct rte_eth_flex_filter *)arg;
if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
|| filter->len % sizeof(uint64_t) != 0) {
PMD_DRV_LOG(ERR, "filter's length is out of range");
return -EINVAL;
}
if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
PMD_DRV_LOG(ERR, "filter's priority is out of range");
return -EINVAL;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = eth_igb_get_flex_filter(dev, filter);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
return ret;
}
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
static inline int
ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
struct e1000_5tuple_filter_info *filter_info)
{
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
return -EINVAL;
if (filter->priority > E1000_2TUPLE_MAX_PRI)
return -EINVAL; /* filter index is out of range. */
if (filter->tcp_flags > TCP_FLAG_ALL)
return -EINVAL; /* flags is invalid. */
switch (filter->dst_ip_mask) {
case UINT32_MAX:
filter_info->dst_ip_mask = 0;
filter_info->dst_ip = filter->dst_ip;
break;
case 0:
filter_info->dst_ip_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}
switch (filter->src_ip_mask) {
case UINT32_MAX:
filter_info->src_ip_mask = 0;
filter_info->src_ip = filter->src_ip;
break;
case 0:
filter_info->src_ip_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}
switch (filter->dst_port_mask) {
case UINT16_MAX:
filter_info->dst_port_mask = 0;
filter_info->dst_port = filter->dst_port;
break;
case 0:
filter_info->dst_port_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}
switch (filter->src_port_mask) {
case UINT16_MAX:
filter_info->src_port_mask = 0;
filter_info->src_port = filter->src_port;
break;
case 0:
filter_info->src_port_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}
switch (filter->proto_mask) {
case UINT8_MAX:
filter_info->proto_mask = 0;
filter_info->proto = filter->proto;
break;
case 0:
filter_info->proto_mask = 1;
break;
default:
PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}
filter_info->priority = (uint8_t)filter->priority;
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
filter_info->tcp_flags = filter->tcp_flags;
else
filter_info->tcp_flags = 0;
return 0;
}
static inline struct e1000_5tuple_filter *
igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
struct e1000_5tuple_filter_info *key)
{
struct e1000_5tuple_filter *it;
TAILQ_FOREACH(it, filter_list, entries) {
if (memcmp(key, &it->filter_info,
sizeof(struct e1000_5tuple_filter_info)) == 0) {
return it;
}
}
return NULL;
}
/*
* igb_add_5tuple_filter_82576 - add a 5tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: ponter to the filter that will be added.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter *filter;
uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
uint8_t i;
int ret;
filter = rte_zmalloc("e1000_5tuple_filter",
sizeof(struct e1000_5tuple_filter), 0);
if (filter == NULL)
return -ENOMEM;
ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
&filter->filter_info);
if (ret < 0) {
rte_free(filter);
return ret;
}
if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
&filter->filter_info) != NULL) {
PMD_DRV_LOG(ERR, "filter exists.");
rte_free(filter);
return -EEXIST;
}
filter->queue = ntuple_filter->queue;
/*
* look for an unused 5tuple filter index,
* and insert the filter to list.
*/
for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
if (!(filter_info->fivetuple_mask & (1 << i))) {
filter_info->fivetuple_mask |= 1 << i;
filter->index = i;
TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
filter,
entries);
break;
}
}
if (i >= E1000_MAX_FTQF_FILTERS) {
PMD_DRV_LOG(ERR, "5tuple filters are full.");
rte_free(filter);
return -ENOSYS;
}
ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
if (filter->filter_info.dst_ip_mask == 0)
ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
if (filter->filter_info.src_port_mask == 0)
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
if (filter->filter_info.proto_mask == 0)
ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
E1000_FTQF_QUEUE_MASK;
ftqf |= E1000_FTQF_QUEUE_ENABLE;
E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
imir |= E1000_IMIR_PORT_BP;
else
imir &= ~E1000_IMIR_PORT_BP;
imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
/* tcp flags bits setting. */
if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_URG;
if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_ACK;
if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_PSH;
if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_RST;
if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_SYN;
if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_FIN;
} else
imir_ext |= E1000_IMIREXT_CTRL_BP;
E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
return 0;
}
/*
* igb_remove_5tuple_filter_82576 - remove a 5tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: ponter to the filter that will be removed.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter_info filter_5tuple;
struct e1000_5tuple_filter *filter;
int ret;
memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
&filter_5tuple);
if (ret < 0)
return ret;
filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
&filter_5tuple);
if (filter == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
return -ENOENT;
}
filter_info->fivetuple_mask &= ~(1 << filter->index);
TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
rte_free(filter);
E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
E1000_FTQF_VF_BP | E1000_FTQF_MASK);
E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
static int
eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
uint32_t rctl;
struct e1000_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
VLAN_TAG_SIZE);
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
#ifdef RTE_LIBRTE_82571_SUPPORT
/* XXX: not bigger than max_rx_pktlen */
if (hw->mac.type == e1000_82571)
return -ENOTSUP;
#endif
eth_igb_infos_get(dev, &dev_info);
/* check that mtu is within the allowed range */
if ((mtu < ETHER_MIN_MTU) ||
(frame_size > dev_info.max_rx_pktlen))
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before. */
if (!dev->data->scattered_rx &&
frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
return -EINVAL;
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.jumbo_frame = 1;
rctl |= E1000_RCTL_LPE;
} else {
dev->data->dev_conf.rxmode.jumbo_frame = 0;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
return 0;
}
/*
* igb_add_del_ntuple_filter - add or delete a ntuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: Pointer to struct rte_eth_ntuple_filter
* add: if true, add filter, if false, remove filter
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
switch (ntuple_filter->flags) {
case RTE_5TUPLE_FLAGS:
case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82576)
return -ENOTSUP;
if (add)
ret = igb_add_5tuple_filter_82576(dev,
ntuple_filter);
else
ret = igb_remove_5tuple_filter_82576(dev,
ntuple_filter);
break;
case RTE_2TUPLE_FLAGS:
case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
return -ENOTSUP;
if (add)
ret = igb_add_2tuple_filter(dev, ntuple_filter);
else
ret = igb_remove_2tuple_filter(dev, ntuple_filter);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*
* igb_get_ntuple_filter - get a ntuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
* ntuple_filter: Pointer to struct rte_eth_ntuple_filter
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
igb_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter_info filter_5tuple;
struct e1000_2tuple_filter_info filter_2tuple;
struct e1000_5tuple_filter *p_5tuple_filter;
struct e1000_2tuple_filter *p_2tuple_filter;
int ret;
switch (ntuple_filter->flags) {
case RTE_5TUPLE_FLAGS:
case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82576)
return -ENOTSUP;
memset(&filter_5tuple,
0,
sizeof(struct e1000_5tuple_filter_info));
ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
&filter_5tuple);
if (ret < 0)
return ret;
p_5tuple_filter = igb_5tuple_filter_lookup_82576(
&filter_info->fivetuple_list,
&filter_5tuple);
if (p_5tuple_filter == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
return -ENOENT;
}
ntuple_filter->queue = p_5tuple_filter->queue;
break;
case RTE_2TUPLE_FLAGS:
case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
return -ENOTSUP;
memset(&filter_2tuple,
0,
sizeof(struct e1000_2tuple_filter_info));
ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
if (ret < 0)
return ret;
p_2tuple_filter = igb_2tuple_filter_lookup(
&filter_info->twotuple_list,
&filter_2tuple);
if (p_2tuple_filter == NULL) {
PMD_DRV_LOG(ERR, "filter doesn't exist.");
return -ENOENT;
}
ntuple_filter->queue = p_2tuple_filter->queue;
break;
default:
ret = -EINVAL;
break;
}
return 0;
}
/*
* igb_ntuple_filter_handle - Handle operations for ntuple filter.
* @dev: pointer to rte_eth_dev structure
* @filter_op:operation will be taken.
* @arg: a pointer to specific structure corresponding to the filter_op
*/
static int
igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
MAC_TYPE_FILTER_SUP(hw->mac.type);
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (arg == NULL) {
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = igb_add_del_ntuple_filter(dev,
(struct rte_eth_ntuple_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = igb_add_del_ntuple_filter(dev,
(struct rte_eth_ntuple_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = igb_get_ntuple_filter(dev,
(struct rte_eth_ntuple_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
return ret;
}
static inline int
igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
uint16_t ethertype)
{
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
if (filter_info->ethertype_filters[i] == ethertype &&
(filter_info->ethertype_mask & (1 << i)))
return i;
}
return -1;
}
static inline int
igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
uint16_t ethertype)
{
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
if (!(filter_info->ethertype_mask & (1 << i))) {
filter_info->ethertype_mask |= 1 << i;
filter_info->ethertype_filters[i] = ethertype;
return i;
}
}
return -1;
}
static inline int
igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
uint8_t idx)
{
if (idx >= E1000_MAX_ETQF_FILTERS)
return -1;
filter_info->ethertype_mask &= ~(1 << idx);
filter_info->ethertype_filters[idx] = 0;
return idx;
}
static int
igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
uint32_t etqf = 0;
int ret;
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
" ethertype filter.", filter->ether_type);
return -EINVAL;
}
if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
PMD_DRV_LOG(ERR, "mac compare is unsupported.");
return -EINVAL;
}
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
PMD_DRV_LOG(ERR, "drop option is unsupported.");
return -EINVAL;
}
ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
if (ret >= 0 && add) {
PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
filter->ether_type);
return -EEXIST;
}
if (ret < 0 && !add) {
PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
filter->ether_type);
return -ENOENT;
}
if (add) {
ret = igb_ethertype_filter_insert(filter_info,
filter->ether_type);
if (ret < 0) {
PMD_DRV_LOG(ERR, "ethertype filters are full.");
return -ENOSYS;
}
etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
} else {
ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
return -ENOSYS;
}
E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
E1000_WRITE_FLUSH(hw);
return 0;
}
static int
igb_get_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
uint32_t etqf;
int ret;
ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
if (ret < 0) {
PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
filter->ether_type);
return -ENOENT;
}
etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
if (etqf & E1000_ETQF_FILTER_ENABLE) {
filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
filter->flags = 0;
filter->queue = (etqf & E1000_ETQF_QUEUE) >>
E1000_ETQF_QUEUE_SHIFT;
return 0;
}
return -ENOENT;
}
/*
* igb_ethertype_filter_handle - Handle operations for ethertype filter.
* @dev: pointer to rte_eth_dev structure
* @filter_op:operation will be taken.
* @arg: a pointer to specific structure corresponding to the filter_op
*/
static int
igb_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
MAC_TYPE_FILTER_SUP(hw->mac.type);
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (arg == NULL) {
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = igb_add_del_ethertype_filter(dev,
(struct rte_eth_ethertype_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = igb_add_del_ethertype_filter(dev,
(struct rte_eth_ethertype_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = igb_get_ethertype_filter(dev,
(struct rte_eth_ethertype_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
return ret;
}
static int
eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
{
int ret = -EINVAL;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
ret = igb_ntuple_filter_handle(dev, filter_op, arg);
break;
case RTE_ETH_FILTER_ETHERTYPE:
ret = igb_ethertype_filter_handle(dev, filter_op, arg);
break;
case RTE_ETH_FILTER_SYN:
ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
break;
case RTE_ETH_FILTER_FLEXIBLE:
ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
break;
}
return ret;
}
static struct rte_driver pmd_igb_drv = {
.type = PMD_PDEV,
.init = rte_igb_pmd_init,
};
static struct rte_driver pmd_igbvf_drv = {
.type = PMD_PDEV,
.init = rte_igbvf_pmd_init,
};
PMD_REGISTER_DRIVER(pmd_igb_drv);
PMD_REGISTER_DRIVER(pmd_igbvf_drv);
| gopakumarce/dpdk-2.0.0 | lib/librte_pmd_e1000/igb_ethdev.c | C | gpl-2.0 | 102,820 |
<?php
/**
* @class User
*/
class User extends Model {
}
| manureta/sstuv_mapa | script/ext-3.4.0/examples/writer/remote/app/models/user.php | PHP | gpl-2.0 | 66 |
/* mm/ashmem.c
*
* Anonymous Shared Memory Subsystem, ashmem
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <[email protected]>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "ashmem: " fmt
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include "ashmem.h"
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
/**
* struct ashmem_area - The anonymous shared memory area
* @name: The optional name in /proc/pid/maps
* @unpinned_list: The list of all ashmem areas
* @file: The shmem-based backing file
* @size: The size of the mapping, in bytes
* @prot_masks: The allowed protection bits, as vm_flags
*
* The lifecycle of this structure is from our parent file's open() until
* its release(). It is also protected by 'ashmem_mutex'
*
* Warning: Mappings do NOT pin this structure; It dies on close()
*/
struct ashmem_area {
char name[ASHMEM_FULL_NAME_LEN];
struct list_head unpinned_list;
struct file *file;
size_t size;
unsigned long prot_mask;
};
/**
* struct ashmem_range - A range of unpinned/evictable pages
* @lru: The entry in the LRU list
* @unpinned: The entry in its area's unpinned list
* @asma: The associated anonymous shared memory area.
* @pgstart: The starting page (inclusive)
* @pgend: The ending page (inclusive)
* @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
*
* The lifecycle of this structure is from unpin to pin.
* It is protected by 'ashmem_mutex'
*/
struct ashmem_range {
struct list_head lru;
struct list_head unpinned;
struct ashmem_area *asma;
size_t pgstart;
size_t pgend;
unsigned int purged;
};
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
/**
* long lru_count - The count of pages on our LRU list.
*
* This is protected by ashmem_mutex.
*/
static unsigned long lru_count;
/**
* ashmem_mutex - protects the list of and each individual ashmem_area
*
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
*/
static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define range_size(range) \
((range)->pgend - (range)->pgstart + 1)
#define range_on_lru(range) \
((range)->purged == ASHMEM_NOT_PURGED)
#define page_range_subsumes_range(range, start, end) \
(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
#define page_range_subsumed_by_range(range, start, end) \
(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
#define page_in_range(range, page) \
(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
#define page_range_in_range(range, start, end) \
(page_in_range(range, start) || page_in_range(range, end) || \
page_range_subsumes_range(range, start, end))
#define range_before_page(range, page) \
((range)->pgend < (page))
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
/**
* lru_add() - Adds a range of memory to the LRU list
* @range: The memory range being added.
*
* The range is first added to the end (tail) of the LRU list.
* After this, the size of the range is added to @lru_count
*/
static inline void lru_add(struct ashmem_range *range)
{
list_add_tail(&range->lru, &ashmem_lru_list);
lru_count += range_size(range);
}
/**
* lru_del() - Removes a range of memory from the LRU list
* @range: The memory range being removed
*
* The range is first deleted from the LRU list.
* After this, the size of the range is removed from @lru_count
*/
static inline void lru_del(struct ashmem_range *range)
{
list_del(&range->lru);
lru_count -= range_size(range);
}
/**
* range_alloc() - Allocates and initializes a new ashmem_range structure
* @asma: The associated ashmem_area
* @prev_range: The previous ashmem_range in the sorted asma->unpinned list
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
* @start: The starting page (inclusive)
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
*
* Return: 0 if successful, or -ENOMEM if there is an error
*/
static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged,
size_t start, size_t end)
{
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (unlikely(!range))
return -ENOMEM;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
range->purged = purged;
list_add_tail(&range->unpinned, &prev_range->unpinned);
if (range_on_lru(range))
lru_add(range);
return 0;
}
/**
* range_del() - Deletes and dealloctes an ashmem_range structure
* @range: The associated ashmem_range that has previously been allocated
*/
static void range_del(struct ashmem_range *range)
{
list_del(&range->unpinned);
if (range_on_lru(range))
lru_del(range);
kmem_cache_free(ashmem_range_cachep, range);
}
/**
* range_shrink() - Shrinks an ashmem_range
* @range: The associated ashmem_range being shrunk
* @start: The starting byte of the new range
* @end: The ending byte of the new range
*
* This does not modify the data inside the existing range in any way - It
* simply shrinks the boundaries of the range.
*
* Theoretically, with a little tweaking, this could eventually be changed
* to range_resize, and expand the lru_count if the new range is larger.
*/
static inline void range_shrink(struct ashmem_range *range,
size_t start, size_t end)
{
size_t pre = range_size(range);
range->pgstart = start;
range->pgend = end;
if (range_on_lru(range))
lru_count -= pre - range_size(range);
}
/**
* ashmem_open() - Opens an Anonymous Shared Memory structure
* @inode: The backing file's index node(?)
* @file: The backing file
*
* Please note that the ashmem_area is not returned by this function - It is
* instead written to "file->private_data".
*
* Return: 0 if successful, or another code if unsuccessful.
*/
static int ashmem_open(struct inode *inode, struct file *file)
{
struct ashmem_area *asma;
int ret;
ret = generic_file_open(inode, file);
if (unlikely(ret))
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
if (unlikely(!asma))
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
asma->prot_mask = PROT_MASK;
file->private_data = asma;
return 0;
}
/**
* ashmem_release() - Releases an Anonymous Shared Memory structure
* @ignored: The backing file's Index Node(?) - It is ignored here.
* @file: The backing file
*
* Return: 0 if successful. If it is anything else, go have a coffee and
* try again.
*/
static int ashmem_release(struct inode *ignored, struct file *file)
{
struct ashmem_area *asma = file->private_data;
struct ashmem_range *range, *next;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
range_del(range);
mutex_unlock(&ashmem_mutex);
if (asma->file)
fput(asma->file);
kmem_cache_free(ashmem_area_cachep, asma);
return 0;
}
/**
* ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
* @file: The associated backing file.
* @buf: The buffer of data being written to
* @len: The number of bytes being read
* @pos: The position of the first byte to read.
*
* Return: 0 if successful, or another return code if not.
*/
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
goto out_unlock;
if (!asma->file) {
ret = -EBADF;
goto out_unlock;
}
mutex_unlock(&ashmem_mutex);
/*
* asma and asma->file are used outside the lock here. We assume
* once asma->file is set it will never be changed, and will not
* be destroyed until all references to the file are dropped and
* ashmem_release is called.
*/
ret = asma->file->f_op->read(asma->file, buf, len, pos);
if (ret >= 0) {
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
}
return ret;
out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
{
struct ashmem_area *asma = file->private_data;
int ret;
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
ret = -EINVAL;
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = asma->file->f_op->llseek(asma->file, offset, origin);
if (ret < 0)
goto out;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
if (unlikely(!asma->size)) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
ret = -EPERM;
goto out;
}
vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
/* ... and allocate the backing shmem file */
vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
if (unlikely(IS_ERR(vmfile))) {
ret = PTR_ERR(vmfile);
goto out;
}
asma->file = vmfile;
}
get_file(asma->file);
if (vma->vm_flags & VM_SHARED)
shmem_set_file(vma, asma->file);
else {
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = asma->file;
}
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
* 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
* Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
if (!mutex_trylock(&ashmem_mutex))
return -1;
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
do_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
return freed;
}
static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* note that lru_count is count of pages on the lru, not a count of
* objects on the list. This means the scan function needs to return the
* number of pages freed, not the number of objects scanned.
*/
return lru_count;
}
static struct shrinker ashmem_shrinker = {
.count_objects = ashmem_shrink_count,
.scan_objects = ashmem_shrink_scan,
/*
* XXX (dchinner): I wish people would comment on why they need on
* significant changes to the default value here
*/
.seeks = DEFAULT_SEEKS * 4,
};
static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
{
int ret = 0;
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
if (unlikely((asma->prot_mask & prot) != prot)) {
ret = -EINVAL;
goto out;
}
/* does the application expect PROT_READ to imply PROT_EXEC? */
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
asma->prot_mask = prot;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static int set_name(struct ashmem_area *asma, void __user *name)
{
int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
* an data abort which would try to access mmap_sem. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
if (len == ASHMEM_NAME_LEN)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (unlikely(asma->file))
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
mutex_unlock(&ashmem_mutex);
return ret;
}
static int get_name(struct ashmem_area *asma, void __user *name)
{
int ret = 0;
size_t len;
/*
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
* wait for mmap_sem, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
mutex_lock(&ashmem_mutex);
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
/*
* Copying only `len', instead of ASHMEM_NAME_LEN, bytes
* prevents us from revealing one user's stack to another.
*/
len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
} else {
len = sizeof(ASHMEM_NAME_DEF);
memcpy(local_name, ASHMEM_NAME_DEF, len);
}
mutex_unlock(&ashmem_mutex);
/*
* Now we are just copying from the stack variable to userland
* No lock held
*/
if (unlikely(copy_to_user(name, local_name, len)))
ret = -EFAULT;
return ret;
}
/*
* ashmem_pin - pin the given ashmem region, returning whether it was
* previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* moved past last applicable page; we can short circuit */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to pin pages that span multiple ranges,
* or to pin pages that aren't even unpinned, so this is messy.
*
* Four cases:
* 1. The requested range subsumes an existing range, so we
* just remove the entire matching range.
* 2. The requested range overlaps the start of an existing
* range, so we just update that range.
* 3. The requested range overlaps the end of an existing
* range, so we just update that range.
* 4. The requested range punches a hole in an existing range,
* so we have to update one side of the range and then
* create a new range for the other side.
*/
if (page_range_in_range(range, pgstart, pgend)) {
ret |= range->purged;
/* Case #1: Easy. Just nuke the whole thing. */
if (page_range_subsumes_range(range, pgstart, pgend)) {
range_del(range);
continue;
}
/* Case #2: We overlap from the start, so adjust it */
if (range->pgstart >= pgstart) {
range_shrink(range, pgend + 1, range->pgend);
continue;
}
/* Case #3: We overlap from the rear, so adjust it */
if (range->pgend <= pgend) {
range_shrink(range, range->pgstart, pgstart-1);
continue;
}
/*
* Case #4: We eat a chunk out of the middle. A bit
* more complicated, we allocate a new range for the
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
pgend + 1, range->pgend);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
}
return ret;
}
/*
* ashmem_unpin - unpin the given range of pages. Returns zero on success.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
restart:
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* short circuit: this is our insertion point */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to unpin pages that are already entirely
* or partially pinned. We handle those two cases here.
*/
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
pgstart = min_t(size_t, range->pgstart, pgstart),
pgend = max_t(size_t, range->pgend, pgend);
purged |= range->purged;
range_del(range);
goto restart;
}
}
return range_alloc(asma, range, purged, pgstart, pgend);
}
/*
* ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
* given interval are unpinned and ASHMEM_IS_PINNED otherwise.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
size_t pgend)
{
struct ashmem_range *range;
int ret = ASHMEM_IS_PINNED;
list_for_each_entry(range, &asma->unpinned_list, unpinned) {
if (range_before_page(range, pgstart))
break;
if (page_range_in_range(range, pgstart, pgend)) {
ret = ASHMEM_IS_UNPINNED;
break;
}
}
return ret;
}
static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
void __user *p)
{
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
if (unlikely(!asma->file))
return -EINVAL;
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
return -EINVAL;
if (unlikely(((__u32) -1) - pin.offset < pin.len))
return -EINVAL;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
return -EINVAL;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
mutex_lock(&ashmem_mutex);
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
break;
case ASHMEM_UNPIN:
ret = ashmem_unpin(asma, pgstart, pgend);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
break;
}
mutex_unlock(&ashmem_mutex);
return ret;
}
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ashmem_area *asma = file->private_data;
long ret = -ENOTTY;
switch (cmd) {
case ASHMEM_SET_NAME:
ret = set_name(asma, (void __user *) arg);
break;
case ASHMEM_GET_NAME:
ret = get_name(asma, (void __user *) arg);
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
if (!asma->file) {
ret = 0;
asma->size = (size_t) arg;
}
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
break;
case ASHMEM_SET_PROT_MASK:
ret = set_prot_mask(asma, arg);
break;
case ASHMEM_GET_PROT_MASK:
ret = asma->prot_mask;
break;
case ASHMEM_PIN:
case ASHMEM_UNPIN:
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
break;
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
.nr_to_scan = LONG_MAX,
};
ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
nodes_setall(sc.nodes_to_scan);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
return ret;
}
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case COMPAT_ASHMEM_SET_SIZE:
cmd = ASHMEM_SET_SIZE;
break;
case COMPAT_ASHMEM_SET_PROT_MASK:
cmd = ASHMEM_SET_PROT_MASK;
break;
}
return ashmem_ioctl(file, cmd, arg);
}
#endif
static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
.read = ashmem_read,
.llseek = ashmem_llseek,
.mmap = ashmem_mmap,
.unlocked_ioctl = ashmem_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ashmem_ioctl,
#endif
};
static struct miscdevice ashmem_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ashmem",
.fops = &ashmem_fops,
};
static int __init ashmem_init(void)
{
int ret;
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
if (unlikely(!ashmem_area_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
if (unlikely(!ashmem_range_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ret = misc_register(&ashmem_misc);
if (unlikely(ret)) {
pr_err("failed to register misc device!\n");
return ret;
}
register_shrinker(&ashmem_shrinker);
pr_info("initialized\n");
return 0;
}
static void __exit ashmem_exit(void)
{
int ret;
unregister_shrinker(&ashmem_shrinker);
ret = misc_deregister(&ashmem_misc);
if (unlikely(ret))
pr_err("failed to unregister misc device!\n");
kmem_cache_destroy(ashmem_range_cachep);
kmem_cache_destroy(ashmem_area_cachep);
pr_info("unloaded\n");
}
module_init(ashmem_init);
module_exit(ashmem_exit);
MODULE_LICENSE("GPL");
| Raybuntu/linux | drivers/staging/android/ashmem.c | C | gpl-2.0 | 23,077 |
<?php
/**
* @package Joomla.Legacy
* @subpackage Model
*
* @copyright Copyright (C) 2005 - 2016 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE
*/
defined('JPATH_PLATFORM') or die;
use Joomla\Registry\Registry;
/**
* Prototype admin model.
*
* @since 12.2
*/
abstract class JModelAdmin extends JModelForm
{
/**
* The prefix to use with controller messages.
*
* @var string
* @since 12.2
*/
protected $text_prefix = null;
/**
* The event to trigger after deleting the data.
*
* @var string
* @since 12.2
*/
protected $event_after_delete = null;
/**
* The event to trigger after saving the data.
*
* @var string
* @since 12.2
*/
protected $event_after_save = null;
/**
* The event to trigger before deleting the data.
*
* @var string
* @since 12.2
*/
protected $event_before_delete = null;
/**
* The event to trigger before saving the data.
*
* @var string
* @since 12.2
*/
protected $event_before_save = null;
/**
* The event to trigger after changing the published state of the data.
*
* @var string
* @since 12.2
*/
protected $event_change_state = null;
/**
* Batch copy/move command. If set to false,
* the batch copy/move command is not supported
*
* @var string
*/
protected $batch_copymove = 'category_id';
/**
* Allowed batch commands
*
* @var array
*/
protected $batch_commands = array(
'assetgroup_id' => 'batchAccess',
'language_id' => 'batchLanguage',
'tag' => 'batchTag'
);
/**
* The context used for the associations table
*
* @var string
* @since 3.4.4
*/
protected $associationsContext = null;
/**
* Constructor.
*
* @param array $config An optional associative array of configuration settings.
*
* @see JModelLegacy
* @since 12.2
*/
public function __construct($config = array())
{
parent::__construct($config);
if (isset($config['event_after_delete']))
{
$this->event_after_delete = $config['event_after_delete'];
}
elseif (empty($this->event_after_delete))
{
$this->event_after_delete = 'onContentAfterDelete';
}
if (isset($config['event_after_save']))
{
$this->event_after_save = $config['event_after_save'];
}
elseif (empty($this->event_after_save))
{
$this->event_after_save = 'onContentAfterSave';
}
if (isset($config['event_before_delete']))
{
$this->event_before_delete = $config['event_before_delete'];
}
elseif (empty($this->event_before_delete))
{
$this->event_before_delete = 'onContentBeforeDelete';
}
if (isset($config['event_before_save']))
{
$this->event_before_save = $config['event_before_save'];
}
elseif (empty($this->event_before_save))
{
$this->event_before_save = 'onContentBeforeSave';
}
if (isset($config['event_change_state']))
{
$this->event_change_state = $config['event_change_state'];
}
elseif (empty($this->event_change_state))
{
$this->event_change_state = 'onContentChangeState';
}
$config['events_map'] = isset($config['events_map']) ? $config['events_map'] : array();
$this->events_map = array_merge(
array(
'delete' => 'content',
'save' => 'content',
'change_state' => 'content',
'validate' => 'content',
), $config['events_map']
);
// Guess the JText message prefix. Defaults to the option.
if (isset($config['text_prefix']))
{
$this->text_prefix = strtoupper($config['text_prefix']);
}
elseif (empty($this->text_prefix))
{
$this->text_prefix = strtoupper($this->option);
}
}
/**
* Method to perform batch operations on an item or a set of items.
*
* @param array $commands An array of commands to perform.
* @param array $pks An array of item ids.
* @param array $contexts An array of item contexts.
*
* @return boolean Returns true on success, false on failure.
*
* @since 12.2
*/
public function batch($commands, $pks, $contexts)
{
// Sanitize ids.
$pks = array_unique($pks);
JArrayHelper::toInteger($pks);
// Remove any values of zero.
if (array_search(0, $pks, true))
{
unset($pks[array_search(0, $pks, true)]);
}
if (empty($pks))
{
$this->setError(JText::_('JGLOBAL_NO_ITEM_SELECTED'));
return false;
}
$done = false;
// Set some needed variables.
$this->user = JFactory::getUser();
$this->table = $this->getTable();
$this->tableClassName = get_class($this->table);
$this->contentType = new JUcmType;
$this->type = $this->contentType->getTypeByTable($this->tableClassName);
$this->batchSet = true;
if ($this->type == false)
{
$type = new JUcmType;
$this->type = $type->getTypeByAlias($this->typeAlias);
}
$this->tagsObserver = $this->table->getObserverOfClass('JTableObserverTags');
if ($this->batch_copymove && !empty($commands[$this->batch_copymove]))
{
$cmd = JArrayHelper::getValue($commands, 'move_copy', 'c');
if ($cmd == 'c')
{
$result = $this->batchCopy($commands[$this->batch_copymove], $pks, $contexts);
if (is_array($result))
{
foreach ($result as $old => $new)
{
$contexts[$new] = $contexts[$old];
}
$pks = array_values($result);
}
else
{
return false;
}
}
elseif ($cmd == 'm' && !$this->batchMove($commands[$this->batch_copymove], $pks, $contexts))
{
return false;
}
$done = true;
}
foreach ($this->batch_commands as $identifier => $command)
{
if (strlen($commands[$identifier]) > 0)
{
if (!$this->$command($commands[$identifier], $pks, $contexts))
{
return false;
}
$done = true;
}
}
if (!$done)
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_INSUFFICIENT_BATCH_INFORMATION'));
return false;
}
// Clear the cache
$this->cleanCache();
return true;
}
/**
* Batch access level changes for a group of rows.
*
* @param integer $value The new value matching an Asset Group ID.
* @param array $pks An array of row IDs.
* @param array $contexts An array of item contexts.
*
* @return boolean True if successful, false otherwise and internal error is set.
*
* @since 12.2
*/
protected function batchAccess($value, $pks, $contexts)
{
if (empty($this->batchSet))
{
// Set some needed variables.
$this->user = JFactory::getUser();
$this->table = $this->getTable();
$this->tableClassName = get_class($this->table);
$this->contentType = new JUcmType;
$this->type = $this->contentType->getTypeByTable($this->tableClassName);
}
foreach ($pks as $pk)
{
if ($this->user->authorise('core.edit', $contexts[$pk]))
{
$this->table->reset();
$this->table->load($pk);
$this->table->access = (int) $value;
if (!empty($this->type))
{
$this->createTagsHelper($this->tagsObserver, $this->type, $pk, $this->typeAlias, $this->table);
}
if (!$this->table->store())
{
$this->setError($this->table->getError());
return false;
}
}
else
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_CANNOT_EDIT'));
return false;
}
}
// Clean the cache
$this->cleanCache();
return true;
}
/**
* Batch copy items to a new category or current.
*
* @param integer $value The new category.
* @param array $pks An array of row IDs.
* @param array $contexts An array of item contexts.
*
* @return mixed An array of new IDs on success, boolean false on failure.
*
* @since 12.2
*/
protected function batchCopy($value, $pks, $contexts)
{
if (empty($this->batchSet))
{
// Set some needed variables.
$this->user = JFactory::getUser();
$this->table = $this->getTable();
$this->tableClassName = get_class($this->table);
$this->contentType = new JUcmType;
$this->type = $this->contentType->getTypeByTable($this->tableClassName);
}
$categoryId = $value;
if (!static::checkCategoryId($categoryId))
{
return false;
}
$newIds = array();
// Parent exists so let's proceed
while (!empty($pks))
{
// Pop the first ID off the stack
$pk = array_shift($pks);
$this->table->reset();
// Check that the row actually exists
if (!$this->table->load($pk))
{
if ($error = $this->table->getError())
{
// Fatal error
$this->setError($error);
return false;
}
else
{
// Not fatal error
$this->setError(JText::sprintf('JLIB_APPLICATION_ERROR_BATCH_MOVE_ROW_NOT_FOUND', $pk));
continue;
}
}
static::generateTitle($categoryId, $this->table);
// Reset the ID because we are making a copy
$this->table->id = 0;
// Unpublish because we are making a copy
if (isset($this->table->published))
{
$this->table->published = 0;
}
elseif (isset($this->table->state))
{
$this->table->state = 0;
}
// New category ID
$this->table->catid = $categoryId;
// TODO: Deal with ordering?
// $this->table->ordering = 1;
// Check the row.
if (!$this->table->check())
{
$this->setError($this->table->getError());
return false;
}
if (!empty($this->type))
{
$this->createTagsHelper($this->tagsObserver, $this->type, $pk, $this->typeAlias, $this->table);
}
// Store the row.
if (!$this->table->store())
{
$this->setError($this->table->getError());
return false;
}
// Get the new item ID
$newId = $this->table->get('id');
// Add the new ID to the array
$newIds[$pk] = $newId;
}
// Clean the cache
$this->cleanCache();
return $newIds;
}
/**
* Batch language changes for a group of rows.
*
* @param string $value The new value matching a language.
* @param array $pks An array of row IDs.
* @param array $contexts An array of item contexts.
*
* @return boolean True if successful, false otherwise and internal error is set.
*
* @since 11.3
*/
protected function batchLanguage($value, $pks, $contexts)
{
if (empty($this->batchSet))
{
// Set some needed variables.
$this->user = JFactory::getUser();
$this->table = $this->getTable();
$this->tableClassName = get_class($this->table);
$this->contentType = new JUcmType;
$this->type = $this->contentType->getTypeByTable($this->tableClassName);
}
foreach ($pks as $pk)
{
if ($this->user->authorise('core.edit', $contexts[$pk]))
{
$this->table->reset();
$this->table->load($pk);
$this->table->language = $value;
if (!empty($this->type))
{
$this->createTagsHelper($this->tagsObserver, $this->type, $pk, $this->typeAlias, $this->table);
}
if (!$this->table->store())
{
$this->setError($this->table->getError());
return false;
}
}
else
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_CANNOT_EDIT'));
return false;
}
}
// Clean the cache
$this->cleanCache();
return true;
}
/**
* Batch move items to a new category
*
* @param integer $value The new category ID.
* @param array $pks An array of row IDs.
* @param array $contexts An array of item contexts.
*
* @return boolean True if successful, false otherwise and internal error is set.
*
* @since 12.2
*/
protected function batchMove($value, $pks, $contexts)
{
if (empty($this->batchSet))
{
// Set some needed variables.
$this->user = JFactory::getUser();
$this->table = $this->getTable();
$this->tableClassName = get_class($this->table);
$this->contentType = new JUcmType;
$this->type = $this->contentType->getTypeByTable($this->tableClassName);
}
$categoryId = (int) $value;
if (!static::checkCategoryId($categoryId))
{
return false;
}
// Parent exists so we proceed
foreach ($pks as $pk)
{
if (!$this->user->authorise('core.edit', $contexts[$pk]))
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_CANNOT_EDIT'));
return false;
}
// Check that the row actually exists
if (!$this->table->load($pk))
{
if ($error = $this->table->getError())
{
// Fatal error
$this->setError($error);
return false;
}
else
{
// Not fatal error
$this->setError(JText::sprintf('JLIB_APPLICATION_ERROR_BATCH_MOVE_ROW_NOT_FOUND', $pk));
continue;
}
}
// Set the new category ID
$this->table->catid = $categoryId;
// Check the row.
if (!$this->table->check())
{
$this->setError($this->table->getError());
return false;
}
if (!empty($this->type))
{
$this->createTagsHelper($this->tagsObserver, $this->type, $pk, $this->typeAlias, $this->table);
}
// Store the row.
if (!$this->table->store())
{
$this->setError($this->table->getError());
return false;
}
}
// Clean the cache
$this->cleanCache();
return true;
}
/**
* Batch tag a list of item.
*
* @param integer $value The value of the new tag.
* @param array $pks An array of row IDs.
* @param array $contexts An array of item contexts.
*
* @return void.
*
* @since 3.1
*/
protected function batchTag($value, $pks, $contexts)
{
// Set the variables
$user = JFactory::getUser();
$table = $this->getTable();
foreach ($pks as $pk)
{
if ($user->authorise('core.edit', $contexts[$pk]))
{
$table->reset();
$table->load($pk);
$tags = array($value);
/**
* @var JTableObserverTags $tagsObserver
*/
$tagsObserver = $table->getObserverOfClass('JTableObserverTags');
$result = $tagsObserver->setNewTags($tags, false);
if (!$result)
{
$this->setError($table->getError());
return false;
}
}
else
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_CANNOT_EDIT'));
return false;
}
}
// Clean the cache
$this->cleanCache();
return true;
}
/**
* Method to test whether a record can be deleted.
*
* @param object $record A record object.
*
* @return boolean True if allowed to delete the record. Defaults to the permission for the component.
*
* @since 12.2
*/
protected function canDelete($record)
{
$user = JFactory::getUser();
return $user->authorise('core.delete', $this->option);
}
/**
* Method to test whether a record can be deleted.
*
* @param object $record A record object.
*
* @return boolean True if allowed to change the state of the record. Defaults to the permission for the component.
*
* @since 12.2
*/
protected function canEditState($record)
{
$user = JFactory::getUser();
return $user->authorise('core.edit.state', $this->option);
}
/**
* Method override to check-in a record or an array of record
*
* @param mixed $pks The ID of the primary key or an array of IDs
*
* @return mixed Boolean false if there is an error, otherwise the count of records checked in.
*
* @since 12.2
*/
public function checkin($pks = array())
{
$pks = (array) $pks;
$table = $this->getTable();
$count = 0;
if (empty($pks))
{
$pks = array((int) $this->getState($this->getName() . '.id'));
}
// Check in all items.
foreach ($pks as $pk)
{
if ($table->load($pk))
{
if ($table->checked_out > 0)
{
if (!parent::checkin($pk))
{
return false;
}
$count++;
}
}
else
{
$this->setError($table->getError());
return false;
}
}
return $count;
}
/**
* Method override to check-out a record.
*
* @param integer $pk The ID of the primary key.
*
* @return boolean True if successful, false if an error occurs.
*
* @since 12.2
*/
public function checkout($pk = null)
{
$pk = (!empty($pk)) ? $pk : (int) $this->getState($this->getName() . '.id');
return parent::checkout($pk);
}
/**
* Method to delete one or more records.
*
* @param array &$pks An array of record primary keys.
*
* @return boolean True if successful, false if an error occurs.
*
* @since 12.2
*/
public function delete(&$pks)
{
$dispatcher = JEventDispatcher::getInstance();
$pks = (array) $pks;
$table = $this->getTable();
// Include the plugins for the delete events.
JPluginHelper::importPlugin($this->events_map['delete']);
// Iterate the items to delete each one.
foreach ($pks as $i => $pk)
{
if ($table->load($pk))
{
if ($this->canDelete($table))
{
$context = $this->option . '.' . $this->name;
// Trigger the before delete event.
$result = $dispatcher->trigger($this->event_before_delete, array($context, $table));
if (in_array(false, $result, true))
{
$this->setError($table->getError());
return false;
}
// Multilanguage: if associated, delete the item in the _associations table
if ($this->associationsContext && JLanguageAssociations::isEnabled())
{
$db = JFactory::getDbo();
$query = $db->getQuery(true)
->select('COUNT(*) as count, ' . $db->quoteName('as1.key'))
->from($db->quoteName('#__associations') . ' AS as1')
->join('LEFT', $db->quoteName('#__associations') . ' AS as2 ON ' . $db->quoteName('as1.key') . ' = ' . $db->quoteName('as2.key'))
->where($db->quoteName('as1.context') . ' = ' . $db->quote($this->associationsContext))
->where($db->quoteName('as1.id') . ' = ' . (int) $pk)
->group($db->quoteName('as1.key'));
$db->setQuery($query);
$row = $db->loadAssoc();
if (!empty($row['count']))
{
$query = $db->getQuery(true)
->delete($db->quoteName('#__associations'))
->where($db->quoteName('context') . ' = ' . $db->quote($this->associationsContext))
->where($db->quoteName('key') . ' = ' . $db->quote($row['key']));
if ($row['count'] > 2)
{
$query->where($db->quoteName('id') . ' = ' . (int) $pk);
}
$db->setQuery($query);
$db->execute();
}
}
if (!$table->delete($pk))
{
$this->setError($table->getError());
return false;
}
// Trigger the after event.
$dispatcher->trigger($this->event_after_delete, array($context, $table));
}
else
{
// Prune items that you can't change.
unset($pks[$i]);
$error = $this->getError();
if ($error)
{
JLog::add($error, JLog::WARNING, 'jerror');
return false;
}
else
{
JLog::add(JText::_('JLIB_APPLICATION_ERROR_DELETE_NOT_PERMITTED'), JLog::WARNING, 'jerror');
return false;
}
}
}
else
{
$this->setError($table->getError());
return false;
}
}
// Clear the component's cache
$this->cleanCache();
return true;
}
/**
* Method to change the title & alias.
*
* @param integer $category_id The id of the category.
* @param string $alias The alias.
* @param string $title The title.
*
* @return array Contains the modified title and alias.
*
* @since 12.2
*/
protected function generateNewTitle($category_id, $alias, $title)
{
// Alter the title & alias
$table = $this->getTable();
while ($table->load(array('alias' => $alias, 'catid' => $category_id)))
{
$title = JString::increment($title);
$alias = JString::increment($alias, 'dash');
}
return array($title, $alias);
}
/**
* Method to get a single record.
*
* @param integer $pk The id of the primary key.
*
* @return mixed Object on success, false on failure.
*
* @since 12.2
*/
public function getItem($pk = null)
{
$pk = (!empty($pk)) ? $pk : (int) $this->getState($this->getName() . '.id');
$table = $this->getTable();
if ($pk > 0)
{
// Attempt to load the row.
$return = $table->load($pk);
// Check for a table object error.
if ($return === false && $table->getError())
{
$this->setError($table->getError());
return false;
}
}
// Convert to the JObject before adding other data.
$properties = $table->getProperties(1);
$item = JArrayHelper::toObject($properties, 'JObject');
if (property_exists($item, 'params'))
{
$registry = new Registry;
$registry->loadString($item->params);
$item->params = $registry->toArray();
}
return $item;
}
/**
* A protected method to get a set of ordering conditions.
*
* @param JTable $table A JTable object.
*
* @return array An array of conditions to add to ordering queries.
*
* @since 12.2
*/
protected function getReorderConditions($table)
{
return array();
}
/**
* Stock method to auto-populate the model state.
*
* @return void
*
* @since 12.2
*/
protected function populateState()
{
$table = $this->getTable();
$key = $table->getKeyName();
// Get the pk of the record from the request.
$pk = JFactory::getApplication()->input->getInt($key);
$this->setState($this->getName() . '.id', $pk);
// Load the parameters.
$value = JComponentHelper::getParams($this->option);
$this->setState('params', $value);
}
/**
* Prepare and sanitise the table data prior to saving.
*
* @param JTable $table A reference to a JTable object.
*
* @return void
*
* @since 12.2
*/
protected function prepareTable($table)
{
// Derived class will provide its own implementation if required.
}
/**
* Method to change the published state of one or more records.
*
* @param array &$pks A list of the primary keys to change.
* @param integer $value The value of the published state.
*
* @return boolean True on success.
*
* @since 12.2
*/
public function publish(&$pks, $value = 1)
{
$dispatcher = JEventDispatcher::getInstance();
$user = JFactory::getUser();
$table = $this->getTable();
$pks = (array) $pks;
// Include the plugins for the change of state event.
JPluginHelper::importPlugin($this->events_map['change_state']);
// Access checks.
foreach ($pks as $i => $pk)
{
$table->reset();
if ($table->load($pk))
{
if (!$this->canEditState($table))
{
// Prune items that you can't change.
unset($pks[$i]);
JLog::add(JText::_('JLIB_APPLICATION_ERROR_EDITSTATE_NOT_PERMITTED'), JLog::WARNING, 'jerror');
return false;
}
// If the table is checked out by another user, drop it and report to the user trying to change its state.
if (property_exists($table, 'checked_out') && $table->checked_out && ($table->checked_out != $user->id))
{
JLog::add(JText::_('JLIB_APPLICATION_ERROR_CHECKIN_USER_MISMATCH'), JLog::WARNING, 'jerror');
// Prune items that you can't change.
unset($pks[$i]);
return false;
}
}
}
// Attempt to change the state of the records.
if (!$table->publish($pks, $value, $user->get('id')))
{
$this->setError($table->getError());
return false;
}
$context = $this->option . '.' . $this->name;
// Trigger the change state event.
$result = $dispatcher->trigger($this->event_change_state, array($context, $pks, $value));
if (in_array(false, $result, true))
{
$this->setError($table->getError());
return false;
}
// Clear the component's cache
$this->cleanCache();
return true;
}
/**
* Method to adjust the ordering of a row.
*
* Returns NULL if the user did not have edit
* privileges for any of the selected primary keys.
*
* @param integer $pks The ID of the primary key to move.
* @param integer $delta Increment, usually +1 or -1
*
* @return mixed False on failure or error, true on success, null if the $pk is empty (no items selected).
*
* @since 12.2
*/
public function reorder($pks, $delta = 0)
{
$table = $this->getTable();
$pks = (array) $pks;
$result = true;
$allowed = true;
foreach ($pks as $i => $pk)
{
$table->reset();
if ($table->load($pk) && $this->checkout($pk))
{
// Access checks.
if (!$this->canEditState($table))
{
// Prune items that you can't change.
unset($pks[$i]);
$this->checkin($pk);
JLog::add(JText::_('JLIB_APPLICATION_ERROR_EDITSTATE_NOT_PERMITTED'), JLog::WARNING, 'jerror');
$allowed = false;
continue;
}
$where = $this->getReorderConditions($table);
if (!$table->move($delta, $where))
{
$this->setError($table->getError());
unset($pks[$i]);
$result = false;
}
$this->checkin($pk);
}
else
{
$this->setError($table->getError());
unset($pks[$i]);
$result = false;
}
}
if ($allowed === false && empty($pks))
{
$result = null;
}
// Clear the component's cache
if ($result == true)
{
$this->cleanCache();
}
return $result;
}
/**
* Method to save the form data.
*
* @param array $data The form data.
*
* @return boolean True on success, False on error.
*
* @since 12.2
*/
public function save($data)
{
$dispatcher = JEventDispatcher::getInstance();
$table = $this->getTable();
$context = $this->option . '.' . $this->name;
if ((!empty($data['tags']) && $data['tags'][0] != ''))
{
$table->newTags = $data['tags'];
}
$key = $table->getKeyName();
$pk = (!empty($data[$key])) ? $data[$key] : (int) $this->getState($this->getName() . '.id');
$isNew = true;
// Include the plugins for the save events.
JPluginHelper::importPlugin($this->events_map['save']);
// Allow an exception to be thrown.
try
{
// Load the row if saving an existing record.
if ($pk > 0)
{
$table->load($pk);
$isNew = false;
}
// Bind the data.
if (!$table->bind($data))
{
$this->setError($table->getError());
return false;
}
// Prepare the row for saving
$this->prepareTable($table);
// Check the data.
if (!$table->check())
{
$this->setError($table->getError());
return false;
}
// Trigger the before save event.
$result = $dispatcher->trigger($this->event_before_save, array($context, $table, $isNew));
if (in_array(false, $result, true))
{
$this->setError($table->getError());
return false;
}
// Store the data.
if (!$table->store())
{
$this->setError($table->getError());
return false;
}
// Clean the cache.
$this->cleanCache();
// Trigger the after save event.
$dispatcher->trigger($this->event_after_save, array($context, $table, $isNew));
}
catch (Exception $e)
{
$this->setError($e->getMessage());
return false;
}
if (isset($table->$key))
{
$this->setState($this->getName() . '.id', $table->$key);
}
$this->setState($this->getName() . '.new', $isNew);
if ($this->associationsContext && JLanguageAssociations::isEnabled() && !empty($data['associations']))
{
$associations = $data['associations'];
// Unset any invalid associations
$associations = Joomla\Utilities\ArrayHelper::toInteger($associations);
// Unset any invalid associations
foreach ($associations as $tag => $id)
{
if (!$id)
{
unset($associations[$tag]);
}
}
// Show a notice if the item isn't assigned to a language but we have associations.
if ($associations && ($table->language == '*'))
{
JFactory::getApplication()->enqueueMessage(
JText::_(strtoupper($this->option) . '_ERROR_ALL_LANGUAGE_ASSOCIATED'),
'notice'
);
}
// Adding self to the association
$associations[$table->language] = (int) $table->$key;
// Deleting old association for these items
$db = $this->getDbo();
$query = $db->getQuery(true)
->delete($db->qn('#__associations'))
->where($db->qn('context') . ' = ' . $db->quote($this->associationsContext))
->where($db->qn('id') . ' IN (' . implode(',', $associations) . ')');
$db->setQuery($query);
$db->execute();
if ((count($associations) > 1) && ($table->language != '*'))
{
// Adding new association for these items
$key = md5(json_encode($associations));
$query = $db->getQuery(true)
->insert('#__associations');
foreach ($associations as $id)
{
$query->values(((int) $id) . ',' . $db->quote($this->associationsContext) . ',' . $db->quote($key));
}
$db->setQuery($query);
$db->execute();
}
}
return true;
}
/**
* Saves the manually set order of records.
*
* @param array $pks An array of primary key ids.
* @param integer $order +1 or -1
*
* @return mixed
*
* @since 12.2
*/
public function saveorder($pks = null, $order = null)
{
$table = $this->getTable();
$tableClassName = get_class($table);
$contentType = new JUcmType;
$type = $contentType->getTypeByTable($tableClassName);
$tagsObserver = $table->getObserverOfClass('JTableObserverTags');
$conditions = array();
if (empty($pks))
{
return JError::raiseWarning(500, JText::_($this->text_prefix . '_ERROR_NO_ITEMS_SELECTED'));
}
// Update ordering values
foreach ($pks as $i => $pk)
{
$table->load((int) $pk);
// Access checks.
if (!$this->canEditState($table))
{
// Prune items that you can't change.
unset($pks[$i]);
JLog::add(JText::_('JLIB_APPLICATION_ERROR_EDITSTATE_NOT_PERMITTED'), JLog::WARNING, 'jerror');
}
elseif ($table->ordering != $order[$i])
{
$table->ordering = $order[$i];
if ($type)
{
$this->createTagsHelper($tagsObserver, $type, $pk, $type->type_alias, $table);
}
if (!$table->store())
{
$this->setError($table->getError());
return false;
}
// Remember to reorder within position and client_id
$condition = $this->getReorderConditions($table);
$found = false;
foreach ($conditions as $cond)
{
if ($cond[1] == $condition)
{
$found = true;
break;
}
}
if (!$found)
{
$key = $table->getKeyName();
$conditions[] = array($table->$key, $condition);
}
}
}
// Execute reorder for each category.
foreach ($conditions as $cond)
{
$table->load($cond[0]);
$table->reorder($cond[1]);
}
// Clear the component's cache
$this->cleanCache();
return true;
}
/**
* Method to create a tags helper to ensure proper management of tags
*
* @param JTableObserverTags $tagsObserver The tags observer for this table
* @param JUcmType $type The type for the table being processed
* @param integer $pk Primary key of the item bing processed
* @param string $typeAlias The type alias for this table
* @param JTable $table The JTable object
*
* @return void
*
* @since 3.2
*/
public function createTagsHelper($tagsObserver, $type, $pk, $typeAlias, $table)
{
if (!empty($tagsObserver) && !empty($type))
{
$table->tagsHelper = new JHelperTags;
$table->tagsHelper->typeAlias = $typeAlias;
$table->tagsHelper->tags = explode(',', $table->tagsHelper->getTagIds($pk, $typeAlias));
}
}
/**
* Method to check the validity of the category ID for batch copy and move
*
* @param integer $categoryId The category ID to check
*
* @return boolean
*
* @since 3.2
*/
protected function checkCategoryId($categoryId)
{
// Check that the category exists
if ($categoryId)
{
$categoryTable = JTable::getInstance('Category');
if (!$categoryTable->load($categoryId))
{
if ($error = $categoryTable->getError())
{
// Fatal error
$this->setError($error);
return false;
}
else
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_MOVE_CATEGORY_NOT_FOUND'));
return false;
}
}
}
if (empty($categoryId))
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_MOVE_CATEGORY_NOT_FOUND'));
return false;
}
// Check that the user has create permission for the component
$extension = JFactory::getApplication()->input->get('option', '');
if (!$this->user->authorise('core.create', $extension . '.category.' . $categoryId))
{
$this->setError(JText::_('JLIB_APPLICATION_ERROR_BATCH_CANNOT_CREATE'));
return false;
}
return true;
}
/**
* A method to preprocess generating a new title in order to allow tables with alternative names
* for alias and title to use the batch move and copy methods
*
* @param integer $categoryId The target category id
* @param JTable $table The JTable within which move or copy is taking place
*
* @return void
*
* @since 3.2
*/
public function generateTitle($categoryId, $table)
{
// Alter the title & alias
$data = $this->generateNewTitle($categoryId, $table->alias, $table->title);
$table->title = $data['0'];
$table->alias = $data['1'];
}
}
| nikitadhiman/gsoc16_shareable-draft-content | libraries/legacy/model/admin.php | PHP | gpl-2.0 | 32,829 |
/*
* drivers/usb/gadget/s3c_udc.h
* Samsung S3C on-chip full/high speed USB device controllers
* Copyright (C) 2005 for Samsung Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __S3C_USB_GADGET
#define __S3C_USB_GADGET
#include <asm/errno.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/list.h>
#include <usb/lin_gadget_compat.h>
#define PHY0_SLEEP (1 << 5)
/*-------------------------------------------------------------------------*/
/* DMA bounce buffer size, 16K is enough even for mass storage */
#define DMA_BUFFER_SIZE (4096*4)
#define EP0_FIFO_SIZE 64
#define EP_FIFO_SIZE 512
#define EP_FIFO_SIZE2 1024
/* ep0-control, ep1in-bulk, ep2out-bulk, ep3in-int */
#define S3C_MAX_ENDPOINTS 4
#define S3C_MAX_HW_ENDPOINTS 16
#define WAIT_FOR_SETUP 0
#define DATA_STATE_XMIT 1
#define DATA_STATE_NEED_ZLP 2
#define WAIT_FOR_OUT_STATUS 3
#define DATA_STATE_RECV 4
#define WAIT_FOR_COMPLETE 5
#define WAIT_FOR_OUT_COMPLETE 6
#define WAIT_FOR_IN_COMPLETE 7
#define WAIT_FOR_NULL_COMPLETE 8
#define TEST_J_SEL 0x1
#define TEST_K_SEL 0x2
#define TEST_SE0_NAK_SEL 0x3
#define TEST_PACKET_SEL 0x4
#define TEST_FORCE_ENABLE_SEL 0x5
/* ************************************************************************* */
/* IO
*/
enum ep_type {
ep_control, ep_bulk_in, ep_bulk_out, ep_interrupt
};
struct s3c_ep {
struct usb_ep ep;
struct s3c_udc *dev;
const struct usb_endpoint_descriptor *desc;
struct list_head queue;
unsigned long pio_irqs;
int len;
void *dma_buf;
u8 stopped;
u8 bEndpointAddress;
u8 bmAttributes;
enum ep_type ep_type;
int fifo_num;
};
struct s3c_request {
struct usb_request req;
struct list_head queue;
};
struct s3c_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct s3c_plat_otg_data *pdata;
void *dma_buf[S3C_MAX_ENDPOINTS+1];
dma_addr_t dma_addr[S3C_MAX_ENDPOINTS+1];
int ep0state;
struct s3c_ep ep[S3C_MAX_ENDPOINTS];
unsigned char usb_address;
unsigned req_pending:1, req_std:1;
};
extern struct s3c_udc *the_controller;
#define ep_is_in(EP) (((EP)->bEndpointAddress&USB_DIR_IN) == USB_DIR_IN)
#define ep_index(EP) ((EP)->bEndpointAddress&0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
/*-------------------------------------------------------------------------*/
/* #define DEBUG_UDC */
#ifdef DEBUG_UDC
#define DBG(stuff...) printf("udc: " stuff)
#else
#define DBG(stuff...) do {} while (0)
#endif
#ifdef DEBUG_S3C_UDC_SETUP
#define DEBUG_SETUP(fmt, args...) printk(fmt, ##args)
#else
#define DEBUG_SETUP(fmt, args...) do {} while (0)
#endif
#ifdef DEBUG_S3C_UDC_EP0
#define DEBUG_EP0(fmt, args...) printk(fmt, ##args)
#else
#define DEBUG_EP0(fmt, args...) do {} while (0)
#endif
#ifdef DEBUG_S3C_UDC_ISR
#define DEBUG_ISR 1
#else
#define DEBUG_ISR 0
#endif
#ifdef DEBUG_S3C_UDC_OUT_EP
#define DEBUG_OUT_EP(fmt, args...) printk(fmt, ##args)
#else
#define DEBUG_OUT_EP(fmt, args...) do {} while (0)
#endif
#ifdef DEBUG_S3C_UDC_IN_EP
#define DEBUG_IN_EP 1
#else
#define DEBUG_IN_EP 0
#endif
#if defined(DEBUG_S3C_UDC_SETUP) || defined(DEBUG_S3C_UDC_EP0) || \
defined(DEBUG_S3C_UDC_ISR) || defined(DEBUG_S3C_UDC_OUT_EP) || \
defined(DEBUG_S3C_UDC_IN_EP) || defined(DEBUG_S3C_UDC)
#define DEBUG
#endif
#define ERR(stuff...) printf("ERR udc: " stuff)
#define WARN(stuff...) printf("WARNING udc: " stuff)
#define INFO(stuff...) printf("INFO udc: " stuff)
extern void otg_phy_init(struct s3c_udc *dev);
extern void otg_phy_off(struct s3c_udc *dev);
extern void s3c_udc_ep_set_stall(struct s3c_ep *ep);
extern int s3c_udc_probe(struct s3c_plat_otg_data *pdata);
struct s3c_plat_otg_data {
int (*phy_control)(int on);
unsigned int regs_phy;
unsigned int regs_otg;
unsigned int usb_phy_ctrl;
unsigned int usb_flags;
};
#endif
| czc0713/Mst786 | include/usb/s3c_udc.h | C | gpl-2.0 | 4,511 |
/*-
* BSD LICENSE
*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Some portions of this software may have been derived from the
* https://github.com/halayli/lthread which carrys the following license.
*
* Copyright (C) 2012, Hasan Alayli <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LTHREAD_INT_H
#include <lthread_api.h>
#define LTHREAD_INT_H
#include <stdint.h>
#include <sys/time.h>
#include <sys/types.h>
#include <errno.h>
#include <pthread.h>
#include <time.h>
#include <rte_cycles.h>
#include <rte_per_lcore.h>
#include <rte_timer.h>
#include <rte_ring.h>
#include <rte_atomic_64.h>
#include <rte_spinlock.h>
#include <ctx.h>
#include <lthread_api.h>
#include "lthread.h"
#include "lthread_diag.h"
#include "lthread_tls.h"
struct lthread;
struct lthread_sched;
struct lthread_cond;
struct lthread_mutex;
struct lthread_key;
struct key_pool;
struct qnode;
struct qnode_pool;
struct lthread_sched;
struct lthread_tls;
#define BIT(x) (1 << (x))
#define CLEARBIT(x) ~(1 << (x))
#define POSIX_ERRNO(x) (x)
#define MAX_LTHREAD_NAME_SIZE 64
#define RTE_LOGTYPE_LTHREAD RTE_LOGTYPE_USER1
/* define some shorthand for current scheduler and current thread */
#define THIS_SCHED RTE_PER_LCORE(this_sched)
#define THIS_LTHREAD RTE_PER_LCORE(this_sched)->current_lthread
/*
* Definition of an scheduler struct
*/
struct lthread_sched {
struct ctx ctx; /* cpu context */
uint64_t birth; /* time created */
struct lthread *current_lthread; /* running thread */
unsigned lcore_id; /* this sched lcore */
int run_flag; /* sched shutdown */
uint64_t nb_blocked_threads; /* blocked threads */
struct lthread_queue *ready; /* local ready queue */
struct lthread_queue *pready; /* peer ready queue */
struct lthread_objcache *lthread_cache; /* free lthreads */
struct lthread_objcache *stack_cache; /* free stacks */
struct lthread_objcache *per_lthread_cache; /* free per lthread */
struct lthread_objcache *tls_cache; /* free TLS */
struct lthread_objcache *cond_cache; /* free cond vars */
struct lthread_objcache *mutex_cache; /* free mutexes */
struct qnode_pool *qnode_pool; /* pool of queue nodes */
struct key_pool *key_pool; /* pool of free TLS keys */
size_t stack_size;
uint64_t diag_ref; /* diag ref */
} __rte_cache_aligned;
RTE_DECLARE_PER_LCORE(struct lthread_sched *, this_sched);
/*
* State for an lthread
*/
enum lthread_st {
ST_LT_INIT, /* initial state */
ST_LT_READY, /* lthread is ready to run */
ST_LT_SLEEPING, /* lthread is sleeping */
ST_LT_EXPIRED, /* lthread timeout has expired */
ST_LT_EXITED, /* lthread has exited and needs cleanup */
ST_LT_DETACH, /* lthread frees on exit*/
ST_LT_CANCELLED, /* lthread has been cancelled */
};
/*
* lthread sub states for exit/join
*/
enum join_st {
LT_JOIN_INITIAL, /* initial state */
LT_JOIN_EXITING, /* thread is exiting */
LT_JOIN_THREAD_SET, /* joining thread has been set */
LT_JOIN_EXIT_VAL_SET, /* exiting thread has set ret val */
LT_JOIN_EXIT_VAL_READ, /* joining thread has collected ret val */
};
/* defnition of an lthread stack object */
struct lthread_stack {
uint8_t stack[LTHREAD_MAX_STACK_SIZE];
size_t stack_size;
struct lthread_sched *root_sched;
} __rte_cache_aligned;
/*
* Definition of an lthread
*/
struct lthread {
struct ctx ctx; /* cpu context */
uint64_t state; /* current lthread state */
struct lthread_sched *sched; /* current scheduler */
void *stack; /* ptr to actual stack */
size_t stack_size; /* current stack_size */
size_t last_stack_size; /* last yield stack_size */
lthread_func_t fun; /* func ctx is running */
void *arg; /* func args passed to func */
void *per_lthread_data; /* per lthread user data */
lthread_exit_func exit_handler; /* called when thread exits */
uint64_t birth; /* time lthread was born */
struct lthread_queue *pending_wr_queue; /* deferred queue to write */
struct lthread *lt_join; /* lthread to join on */
uint64_t join; /* state for joining */
void **lt_exit_ptr; /* exit ptr for lthread_join */
struct lthread_sched *root_sched; /* thread was created here*/
struct queue_node *qnode; /* node when in a queue */
struct rte_timer tim; /* sleep timer */
struct lthread_tls *tls; /* keys in use by the thread */
struct lthread_stack *stack_container; /* stack */
char funcname[MAX_LTHREAD_NAME_SIZE]; /* thread func name */
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
/*
* Assert
*/
#if LTHREAD_DIAG
#define LTHREAD_ASSERT(expr) do { \
if (!(expr)) \
rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
} while (0)
#else
#define LTHREAD_ASSERT(expr) do {} while (0)
#endif
#endif /* LTHREAD_INT_H */
| rsalveti/dpdk | examples/performance-thread/common/lthread_int.h | C | gpl-2.0 | 7,635 |
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
// This file is available under and governed by the GNU General Public
// License version 2 only, as published by the Free Software Foundation.
// However, the following notice accompanied the original version of this
// file:
//
//---------------------------------------------------------------------------------
//
// Little Color Management System
// Copyright (c) 1998-2016 Marti Maria Saguer
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//---------------------------------------------------------------------------------
//
#include "lcms2_internal.h"
#define cmsmin(a, b) (((a) < (b)) ? (a) : (b))
#define cmsmax(a, b) (((a) > (b)) ? (a) : (b))
// This file contains routines for resampling and LUT optimization, black point detection
// and black preservation.
// Black point detection -------------------------------------------------------------------------
// PCS -> PCS round trip transform, always uses relative intent on the device -> pcs
static
cmsHTRANSFORM CreateRoundtripXForm(cmsHPROFILE hProfile, cmsUInt32Number nIntent)
{
cmsContext ContextID = cmsGetProfileContextID(hProfile);
cmsHPROFILE hLab = cmsCreateLab4ProfileTHR(ContextID, NULL);
cmsHTRANSFORM xform;
cmsBool BPC[4] = { FALSE, FALSE, FALSE, FALSE };
cmsFloat64Number States[4] = { 1.0, 1.0, 1.0, 1.0 };
cmsHPROFILE hProfiles[4];
cmsUInt32Number Intents[4];
hProfiles[0] = hLab; hProfiles[1] = hProfile; hProfiles[2] = hProfile; hProfiles[3] = hLab;
Intents[0] = INTENT_RELATIVE_COLORIMETRIC; Intents[1] = nIntent; Intents[2] = INTENT_RELATIVE_COLORIMETRIC; Intents[3] = INTENT_RELATIVE_COLORIMETRIC;
xform = cmsCreateExtendedTransform(ContextID, 4, hProfiles, BPC, Intents,
States, NULL, 0, TYPE_Lab_DBL, TYPE_Lab_DBL, cmsFLAGS_NOCACHE|cmsFLAGS_NOOPTIMIZE);
cmsCloseProfile(hLab);
return xform;
}
// Use darker colorants to obtain black point. This works in the relative colorimetric intent and
// assumes more ink results in darker colors. No ink limit is assumed.
static
cmsBool BlackPointAsDarkerColorant(cmsHPROFILE hInput,
cmsUInt32Number Intent,
cmsCIEXYZ* BlackPoint,
cmsUInt32Number dwFlags)
{
cmsUInt16Number *Black;
cmsHTRANSFORM xform;
cmsColorSpaceSignature Space;
cmsUInt32Number nChannels;
cmsUInt32Number dwFormat;
cmsHPROFILE hLab;
cmsCIELab Lab;
cmsCIEXYZ BlackXYZ;
cmsContext ContextID = cmsGetProfileContextID(hInput);
// If the profile does not support input direction, assume Black point 0
if (!cmsIsIntentSupported(hInput, Intent, LCMS_USED_AS_INPUT)) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Create a formatter which has n channels and floating point
dwFormat = cmsFormatterForColorspaceOfProfile(hInput, 2, FALSE);
// Try to get black by using black colorant
Space = cmsGetColorSpace(hInput);
// This function returns darker colorant in 16 bits for several spaces
if (!_cmsEndPointsBySpace(Space, NULL, &Black, &nChannels)) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
if (nChannels != T_CHANNELS(dwFormat)) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Lab will be used as the output space, but lab2 will avoid recursion
hLab = cmsCreateLab2ProfileTHR(ContextID, NULL);
if (hLab == NULL) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Create the transform
xform = cmsCreateTransformTHR(ContextID, hInput, dwFormat,
hLab, TYPE_Lab_DBL, Intent, cmsFLAGS_NOOPTIMIZE|cmsFLAGS_NOCACHE);
cmsCloseProfile(hLab);
if (xform == NULL) {
// Something went wrong. Get rid of open resources and return zero as black
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Convert black to Lab
cmsDoTransform(xform, Black, &Lab, 1);
// Force it to be neutral, clip to max. L* of 50
Lab.a = Lab.b = 0;
if (Lab.L > 50) Lab.L = 50;
// Free the resources
cmsDeleteTransform(xform);
// Convert from Lab (which is now clipped) to XYZ.
cmsLab2XYZ(NULL, &BlackXYZ, &Lab);
if (BlackPoint != NULL)
*BlackPoint = BlackXYZ;
return TRUE;
cmsUNUSED_PARAMETER(dwFlags);
}
// Get a black point of output CMYK profile, discounting any ink-limiting embedded
// in the profile. For doing that, we use perceptual intent in input direction:
// Lab (0, 0, 0) -> [Perceptual] Profile -> CMYK -> [Rel. colorimetric] Profile -> Lab
static
cmsBool BlackPointUsingPerceptualBlack(cmsCIEXYZ* BlackPoint, cmsHPROFILE hProfile)
{
cmsHTRANSFORM hRoundTrip;
cmsCIELab LabIn, LabOut;
cmsCIEXYZ BlackXYZ;
// Is the intent supported by the profile?
if (!cmsIsIntentSupported(hProfile, INTENT_PERCEPTUAL, LCMS_USED_AS_INPUT)) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return TRUE;
}
hRoundTrip = CreateRoundtripXForm(hProfile, INTENT_PERCEPTUAL);
if (hRoundTrip == NULL) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
LabIn.L = LabIn.a = LabIn.b = 0;
cmsDoTransform(hRoundTrip, &LabIn, &LabOut, 1);
// Clip Lab to reasonable limits
if (LabOut.L > 50) LabOut.L = 50;
LabOut.a = LabOut.b = 0;
cmsDeleteTransform(hRoundTrip);
// Convert it to XYZ
cmsLab2XYZ(NULL, &BlackXYZ, &LabOut);
if (BlackPoint != NULL)
*BlackPoint = BlackXYZ;
return TRUE;
}
// This function shouldn't exist at all -- there is such quantity of broken
// profiles on black point tag, that we must somehow fix chromaticity to
// avoid huge tint when doing Black point compensation. This function does
// just that. There is a special flag for using black point tag, but turned
// off by default because it is bogus on most profiles. The detection algorithm
// involves to turn BP to neutral and to use only L component.
cmsBool CMSEXPORT cmsDetectBlackPoint(cmsCIEXYZ* BlackPoint, cmsHPROFILE hProfile, cmsUInt32Number Intent, cmsUInt32Number dwFlags)
{
cmsProfileClassSignature devClass;
// Make sure the device class is adequate
devClass = cmsGetDeviceClass(hProfile);
if (devClass == cmsSigLinkClass ||
devClass == cmsSigAbstractClass ||
devClass == cmsSigNamedColorClass) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Make sure intent is adequate
if (Intent != INTENT_PERCEPTUAL &&
Intent != INTENT_RELATIVE_COLORIMETRIC &&
Intent != INTENT_SATURATION) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// v4 + perceptual & saturation intents does have its own black point, and it is
// well specified enough to use it. Black point tag is deprecated in V4.
if ((cmsGetEncodedICCversion(hProfile) >= 0x4000000) &&
(Intent == INTENT_PERCEPTUAL || Intent == INTENT_SATURATION)) {
// Matrix shaper share MRC & perceptual intents
if (cmsIsMatrixShaper(hProfile))
return BlackPointAsDarkerColorant(hProfile, INTENT_RELATIVE_COLORIMETRIC, BlackPoint, 0);
// Get Perceptual black out of v4 profiles. That is fixed for perceptual & saturation intents
BlackPoint -> X = cmsPERCEPTUAL_BLACK_X;
BlackPoint -> Y = cmsPERCEPTUAL_BLACK_Y;
BlackPoint -> Z = cmsPERCEPTUAL_BLACK_Z;
return TRUE;
}
#ifdef CMS_USE_PROFILE_BLACK_POINT_TAG
// v2, v4 rel/abs colorimetric
if (cmsIsTag(hProfile, cmsSigMediaBlackPointTag) &&
Intent == INTENT_RELATIVE_COLORIMETRIC) {
cmsCIEXYZ *BlackPtr, BlackXYZ, UntrustedBlackPoint, TrustedBlackPoint, MediaWhite;
cmsCIELab Lab;
// If black point is specified, then use it,
BlackPtr = cmsReadTag(hProfile, cmsSigMediaBlackPointTag);
if (BlackPtr != NULL) {
BlackXYZ = *BlackPtr;
_cmsReadMediaWhitePoint(&MediaWhite, hProfile);
// Black point is absolute XYZ, so adapt to D50 to get PCS value
cmsAdaptToIlluminant(&UntrustedBlackPoint, &MediaWhite, cmsD50_XYZ(), &BlackXYZ);
// Force a=b=0 to get rid of any chroma
cmsXYZ2Lab(NULL, &Lab, &UntrustedBlackPoint);
Lab.a = Lab.b = 0;
if (Lab.L > 50) Lab.L = 50; // Clip to L* <= 50
cmsLab2XYZ(NULL, &TrustedBlackPoint, &Lab);
if (BlackPoint != NULL)
*BlackPoint = TrustedBlackPoint;
return TRUE;
}
}
#endif
// That is about v2 profiles.
// If output profile, discount ink-limiting and that's all
if (Intent == INTENT_RELATIVE_COLORIMETRIC &&
(cmsGetDeviceClass(hProfile) == cmsSigOutputClass) &&
(cmsGetColorSpace(hProfile) == cmsSigCmykData))
return BlackPointUsingPerceptualBlack(BlackPoint, hProfile);
// Nope, compute BP using current intent.
return BlackPointAsDarkerColorant(hProfile, Intent, BlackPoint, dwFlags);
}
// ---------------------------------------------------------------------------------------------------------
// Least Squares Fit of a Quadratic Curve to Data
// http://www.personal.psu.edu/jhm/f90/lectures/lsq2.html
static
cmsFloat64Number RootOfLeastSquaresFitQuadraticCurve(int n, cmsFloat64Number x[], cmsFloat64Number y[])
{
double sum_x = 0, sum_x2 = 0, sum_x3 = 0, sum_x4 = 0;
double sum_y = 0, sum_yx = 0, sum_yx2 = 0;
double d, a, b, c;
int i;
cmsMAT3 m;
cmsVEC3 v, res;
if (n < 4) return 0;
for (i=0; i < n; i++) {
double xn = x[i];
double yn = y[i];
sum_x += xn;
sum_x2 += xn*xn;
sum_x3 += xn*xn*xn;
sum_x4 += xn*xn*xn*xn;
sum_y += yn;
sum_yx += yn*xn;
sum_yx2 += yn*xn*xn;
}
_cmsVEC3init(&m.v[0], n, sum_x, sum_x2);
_cmsVEC3init(&m.v[1], sum_x, sum_x2, sum_x3);
_cmsVEC3init(&m.v[2], sum_x2, sum_x3, sum_x4);
_cmsVEC3init(&v, sum_y, sum_yx, sum_yx2);
if (!_cmsMAT3solve(&res, &m, &v)) return 0;
a = res.n[2];
b = res.n[1];
c = res.n[0];
if (fabs(a) < 1.0E-10) {
return cmsmin(0, cmsmax(50, -c/b ));
}
else {
d = b*b - 4.0 * a * c;
if (d <= 0) {
return 0;
}
else {
double rt = (-b + sqrt(d)) / (2.0 * a);
return cmsmax(0, cmsmin(50, rt));
}
}
}
// Calculates the black point of a destination profile.
// This algorithm comes from the Adobe paper disclosing its black point compensation method.
cmsBool CMSEXPORT cmsDetectDestinationBlackPoint(cmsCIEXYZ* BlackPoint, cmsHPROFILE hProfile, cmsUInt32Number Intent, cmsUInt32Number dwFlags)
{
cmsColorSpaceSignature ColorSpace;
cmsHTRANSFORM hRoundTrip = NULL;
cmsCIELab InitialLab, destLab, Lab;
cmsFloat64Number inRamp[256], outRamp[256];
cmsFloat64Number MinL, MaxL;
cmsBool NearlyStraightMidrange = TRUE;
cmsFloat64Number yRamp[256];
cmsFloat64Number x[256], y[256];
cmsFloat64Number lo, hi;
int n, l;
cmsProfileClassSignature devClass;
// Make sure the device class is adequate
devClass = cmsGetDeviceClass(hProfile);
if (devClass == cmsSigLinkClass ||
devClass == cmsSigAbstractClass ||
devClass == cmsSigNamedColorClass) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Make sure intent is adequate
if (Intent != INTENT_PERCEPTUAL &&
Intent != INTENT_RELATIVE_COLORIMETRIC &&
Intent != INTENT_SATURATION) {
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// v4 + perceptual & saturation intents does have its own black point, and it is
// well specified enough to use it. Black point tag is deprecated in V4.
if ((cmsGetEncodedICCversion(hProfile) >= 0x4000000) &&
(Intent == INTENT_PERCEPTUAL || Intent == INTENT_SATURATION)) {
// Matrix shaper share MRC & perceptual intents
if (cmsIsMatrixShaper(hProfile))
return BlackPointAsDarkerColorant(hProfile, INTENT_RELATIVE_COLORIMETRIC, BlackPoint, 0);
// Get Perceptual black out of v4 profiles. That is fixed for perceptual & saturation intents
BlackPoint -> X = cmsPERCEPTUAL_BLACK_X;
BlackPoint -> Y = cmsPERCEPTUAL_BLACK_Y;
BlackPoint -> Z = cmsPERCEPTUAL_BLACK_Z;
return TRUE;
}
// Check if the profile is lut based and gray, rgb or cmyk (7.2 in Adobe's document)
ColorSpace = cmsGetColorSpace(hProfile);
if (!cmsIsCLUT(hProfile, Intent, LCMS_USED_AS_OUTPUT ) ||
(ColorSpace != cmsSigGrayData &&
ColorSpace != cmsSigRgbData &&
ColorSpace != cmsSigCmykData)) {
// In this case, handle as input case
return cmsDetectBlackPoint(BlackPoint, hProfile, Intent, dwFlags);
}
// It is one of the valid cases!, use Adobe algorithm
// Set a first guess, that should work on good profiles.
if (Intent == INTENT_RELATIVE_COLORIMETRIC) {
cmsCIEXYZ IniXYZ;
// calculate initial Lab as source black point
if (!cmsDetectBlackPoint(&IniXYZ, hProfile, Intent, dwFlags)) {
return FALSE;
}
// convert the XYZ to lab
cmsXYZ2Lab(NULL, &InitialLab, &IniXYZ);
} else {
// set the initial Lab to zero, that should be the black point for perceptual and saturation
InitialLab.L = 0;
InitialLab.a = 0;
InitialLab.b = 0;
}
// Step 2
// ======
// Create a roundtrip. Define a Transform BT for all x in L*a*b*
hRoundTrip = CreateRoundtripXForm(hProfile, Intent);
if (hRoundTrip == NULL) return FALSE;
// Compute ramps
for (l=0; l < 256; l++) {
Lab.L = (cmsFloat64Number) (l * 100.0) / 255.0;
Lab.a = cmsmin(50, cmsmax(-50, InitialLab.a));
Lab.b = cmsmin(50, cmsmax(-50, InitialLab.b));
cmsDoTransform(hRoundTrip, &Lab, &destLab, 1);
inRamp[l] = Lab.L;
outRamp[l] = destLab.L;
}
// Make monotonic
for (l = 254; l > 0; --l) {
outRamp[l] = cmsmin(outRamp[l], outRamp[l+1]);
}
// Check
if (! (outRamp[0] < outRamp[255])) {
cmsDeleteTransform(hRoundTrip);
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// Test for mid range straight (only on relative colorimetric)
NearlyStraightMidrange = TRUE;
MinL = outRamp[0]; MaxL = outRamp[255];
if (Intent == INTENT_RELATIVE_COLORIMETRIC) {
for (l=0; l < 256; l++) {
if (! ((inRamp[l] <= MinL + 0.2 * (MaxL - MinL) ) ||
(fabs(inRamp[l] - outRamp[l]) < 4.0 )))
NearlyStraightMidrange = FALSE;
}
// If the mid range is straight (as determined above) then the
// DestinationBlackPoint shall be the same as initialLab.
// Otherwise, the DestinationBlackPoint shall be determined
// using curve fitting.
if (NearlyStraightMidrange) {
cmsLab2XYZ(NULL, BlackPoint, &InitialLab);
cmsDeleteTransform(hRoundTrip);
return TRUE;
}
}
// curve fitting: The round-trip curve normally looks like a nearly constant section at the black point,
// with a corner and a nearly straight line to the white point.
for (l=0; l < 256; l++) {
yRamp[l] = (outRamp[l] - MinL) / (MaxL - MinL);
}
// find the black point using the least squares error quadratic curve fitting
if (Intent == INTENT_RELATIVE_COLORIMETRIC) {
lo = 0.1;
hi = 0.5;
}
else {
// Perceptual and saturation
lo = 0.03;
hi = 0.25;
}
// Capture shadow points for the fitting.
n = 0;
for (l=0; l < 256; l++) {
cmsFloat64Number ff = yRamp[l];
if (ff >= lo && ff < hi) {
x[n] = inRamp[l];
y[n] = yRamp[l];
n++;
}
}
// No suitable points
if (n < 3 ) {
cmsDeleteTransform(hRoundTrip);
BlackPoint -> X = BlackPoint ->Y = BlackPoint -> Z = 0.0;
return FALSE;
}
// fit and get the vertex of quadratic curve
Lab.L = RootOfLeastSquaresFitQuadraticCurve(n, x, y);
if (Lab.L < 0.0) { // clip to zero L* if the vertex is negative
Lab.L = 0;
}
Lab.a = InitialLab.a;
Lab.b = InitialLab.b;
cmsLab2XYZ(NULL, BlackPoint, &Lab);
cmsDeleteTransform(hRoundTrip);
return TRUE;
}
| FauxFaux/jdk9-jdk | src/java.desktop/share/native/liblcms/cmssamp.c | C | gpl-2.0 | 19,167 |
#!/bin/bash
#for taobao abs
temppath=$1
cd $temppath/packages
if [ `cat /etc/redhat-release|cut -d " " -f 7|cut -d "." -f 1` = 4 ]
then
sed -i "s/^Release:.*$/Release: "$4".el4/" $2.spec
else
sed -i "s/^Release:.*$/Release: "$4".el5/" $2.spec
fi
sed -i "s/^Version:.*$/Version: "$3"/" $2.spec
cd $temppath
chmod +x build.sh
./build.sh init
export TBLIB_ROOT=/opt/csr/common
./configure
make PREFIX=/home/admin/tfs TMP_DIR=/home/ads/tmp/tfs-tmp.$$ rpms
mv *.rpm rpm/
| sfdazsdf/tfs | rpm/tfs-build.sh | Shell | gpl-2.0 | 470 |
/* This is for the `uname' program. */
#define UNAME_UNAME 1
/* This is for the `arch' program. */
#define UNAME_ARCH 2
extern int uname_mode;
| jwendt/coreutils-sort | src/uname.h | C | gpl-3.0 | 146 |
Proj4js.defs["EPSG:30166"] = "+proj=tmerc +lat_0=36 +lon_0=136 +k=0.9999 +x_0=0 +y_0=0 +ellps=bessel +units=m +no_defs"; | debard/georchestra-ird | mapfishapp/src/main/webapp/lib/proj4js/lib/defs/EPSG30166.js | JavaScript | gpl-3.0 | 120 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is built by ./mktables from e.g. UnicodeData.txt.
# Any changes made here will be lost!
#
# This file supports:
# \p{Punct}
#
# Meaning: [[:Punct:]]
#
return <<'END';
0021 0023
0025 002A
002C 002F
003A 003B
003F 0040
005B 005D
005F
007B
007D
00A1
00AB
00B7
00BB
00BF
037E
0387
055A 055F
0589 058A
05BE
05C0
05C3
05F3 05F4
060C 060D
061B
061F
066A 066D
06D4
0700 070D
0964 0965
0970
0DF4
0E4F
0E5A 0E5B
0F04 0F12
0F3A 0F3D
0F85
104A 104F
10FB
1361 1368
166D 166E
169B 169C
16EB 16ED
1735 1736
17D4 17D6
17D8 17DA
1800 180A
1944 1945
2010 2027
2030 2043
2045 2051
2053 2054
2057
207D 207E
208D 208E
2329 232A
23B4 23B6
2768 2775
27E6 27EB
2983 2998
29D8 29DB
29FC 29FD
3001 3003
3008 3011
3014 301F
3030
303D
30A0
30FB
FD3E FD3F
FE30 FE52
FE54 FE61
FE63
FE68
FE6A FE6B
FF01 FF03
FF05 FF0A
FF0C FF0F
FF1A FF1B
FF1F FF20
FF3B FF3D
FF3F
FF5B
FF5D
FF5F FF65
10100 10101
1039F
END
| vtselfa/Multi2Sim | src/benchmarks/spec2006/400.perlbench/lib/unicore/lib/Punct.pl | Perl | gpl-3.0 | 1,052 |
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
class CreateStorageTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('storage', function (Blueprint $table) {
$table->increments('storage_id');
$table->unsignedInteger('device_id')->index();
$table->string('storage_mib', 16);
$table->string('storage_index', 64)->nullable();
$table->string('storage_type', 32)->nullable();
$table->text('storage_descr');
$table->bigInteger('storage_size');
$table->integer('storage_units');
$table->bigInteger('storage_used')->default(0);
$table->bigInteger('storage_free')->default(0);
$table->integer('storage_perc')->default(0);
$table->integer('storage_perc_warn')->nullable()->default(60);
$table->boolean('storage_deleted')->default(0);
$table->unique(['device_id', 'storage_mib', 'storage_index']);
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::drop('storage');
}
}
| rasssta/librenms | database/migrations/2018_07_03_091314_create_storage_table.php | PHP | gpl-3.0 | 1,270 |
/*******************************************************
HIDAPI - Multi-Platform library for
communication with HID devices.
Alan Ott
Signal 11 Software
2010-07-03
Copyright 2010, All Rights Reserved.
At the discretion of the user of this library,
this software may be licensed under the terms of the
GNU Public License v3, a BSD-Style license, or the
original HIDAPI license as outlined in the LICENSE.txt,
LICENSE-gpl3.txt, LICENSE-bsd.txt, and LICENSE-orig.txt
files located at the root of the source distribution.
These files may also be found in the public source
code repository located at:
http://github.com/signal11/hidapi .
********************************************************/
#include "../../SDL_internal.h"
#ifdef SDL_JOYSTICK_HIDAPI
/* See Apple Technical Note TN2187 for details on IOHidManager. */
#include <IOKit/hid/IOHIDManager.h>
#include <IOKit/hid/IOHIDKeys.h>
#include <CoreFoundation/CoreFoundation.h>
#include <wchar.h>
#include <locale.h>
#include <pthread.h>
#include <sys/time.h>
#include <unistd.h>
#include "hidapi.h"
/* Barrier implementation because Mac OSX doesn't have pthread_barrier.
It also doesn't have clock_gettime(). So much for POSIX and SUSv2.
This implementation came from Brent Priddy and was posted on
StackOverflow. It is used with his permission. */
typedef int pthread_barrierattr_t;
typedef struct pthread_barrier {
pthread_mutex_t mutex;
pthread_cond_t cond;
int count;
int trip_count;
} pthread_barrier_t;
static int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
{
if(count == 0) {
errno = EINVAL;
return -1;
}
if(pthread_mutex_init(&barrier->mutex, 0) < 0) {
return -1;
}
if(pthread_cond_init(&barrier->cond, 0) < 0) {
pthread_mutex_destroy(&barrier->mutex);
return -1;
}
barrier->trip_count = count;
barrier->count = 0;
return 0;
}
static int pthread_barrier_destroy(pthread_barrier_t *barrier)
{
pthread_cond_destroy(&barrier->cond);
pthread_mutex_destroy(&barrier->mutex);
return 0;
}
static int pthread_barrier_wait(pthread_barrier_t *barrier)
{
pthread_mutex_lock(&barrier->mutex);
++(barrier->count);
if(barrier->count >= barrier->trip_count)
{
barrier->count = 0;
pthread_cond_broadcast(&barrier->cond);
pthread_mutex_unlock(&barrier->mutex);
return 1;
}
else
{
pthread_cond_wait(&barrier->cond, &(barrier->mutex));
pthread_mutex_unlock(&barrier->mutex);
return 0;
}
}
static int return_data(hid_device *dev, unsigned char *data, size_t length);
/* Linked List of input reports received from the device. */
struct input_report {
uint8_t *data;
size_t len;
struct input_report *next;
};
struct hid_device_ {
IOHIDDeviceRef device_handle;
int blocking;
int uses_numbered_reports;
int disconnected;
CFStringRef run_loop_mode;
CFRunLoopRef run_loop;
CFRunLoopSourceRef source;
uint8_t *input_report_buf;
CFIndex max_input_report_len;
struct input_report *input_reports;
pthread_t thread;
pthread_mutex_t mutex; /* Protects input_reports */
pthread_cond_t condition;
pthread_barrier_t barrier; /* Ensures correct startup sequence */
pthread_barrier_t shutdown_barrier; /* Ensures correct shutdown sequence */
int shutdown_thread;
};
struct hid_device_list_node
{
struct hid_device_ *dev;
struct hid_device_list_node *next;
};
static IOHIDManagerRef hid_mgr = 0x0;
static struct hid_device_list_node *device_list = 0x0;
static hid_device *new_hid_device(void)
{
hid_device *dev = (hid_device*)calloc(1, sizeof(hid_device));
dev->device_handle = NULL;
dev->blocking = 1;
dev->uses_numbered_reports = 0;
dev->disconnected = 0;
dev->run_loop_mode = NULL;
dev->run_loop = NULL;
dev->source = NULL;
dev->input_report_buf = NULL;
dev->input_reports = NULL;
dev->shutdown_thread = 0;
/* Thread objects */
pthread_mutex_init(&dev->mutex, NULL);
pthread_cond_init(&dev->condition, NULL);
pthread_barrier_init(&dev->barrier, NULL, 2);
pthread_barrier_init(&dev->shutdown_barrier, NULL, 2);
return dev;
}
static void free_hid_device(hid_device *dev)
{
if (!dev)
return;
/* Delete any input reports still left over. */
struct input_report *rpt = dev->input_reports;
while (rpt) {
struct input_report *next = rpt->next;
free(rpt->data);
free(rpt);
rpt = next;
}
/* Free the string and the report buffer. The check for NULL
is necessary here as CFRelease() doesn't handle NULL like
free() and others do. */
if (dev->run_loop_mode)
CFRelease(dev->run_loop_mode);
if (dev->source)
CFRelease(dev->source);
free(dev->input_report_buf);
if (device_list) {
if (device_list->dev == dev) {
device_list = device_list->next;
}
else {
struct hid_device_list_node *node = device_list;
while (node) {
if (node->next && node->next->dev == dev) {
struct hid_device_list_node *new_next = node->next->next;
free(node->next);
node->next = new_next;
break;
}
node = node->next;
}
}
}
/* Clean up the thread objects */
pthread_barrier_destroy(&dev->shutdown_barrier);
pthread_barrier_destroy(&dev->barrier);
pthread_cond_destroy(&dev->condition);
pthread_mutex_destroy(&dev->mutex);
/* Free the structure itself. */
free(dev);
}
#if 0
static void register_error(hid_device *device, const char *op)
{
}
#endif
static int32_t get_int_property(IOHIDDeviceRef device, CFStringRef key)
{
CFTypeRef ref;
int32_t value;
ref = IOHIDDeviceGetProperty(device, key);
if (ref) {
if (CFGetTypeID(ref) == CFNumberGetTypeID()) {
CFNumberGetValue((CFNumberRef) ref, kCFNumberSInt32Type, &value);
return value;
}
}
return 0;
}
static unsigned short get_vendor_id(IOHIDDeviceRef device)
{
return get_int_property(device, CFSTR(kIOHIDVendorIDKey));
}
static unsigned short get_product_id(IOHIDDeviceRef device)
{
return get_int_property(device, CFSTR(kIOHIDProductIDKey));
}
static int32_t get_max_report_length(IOHIDDeviceRef device)
{
return get_int_property(device, CFSTR(kIOHIDMaxInputReportSizeKey));
}
static int get_string_property(IOHIDDeviceRef device, CFStringRef prop, wchar_t *buf, size_t len)
{
CFStringRef str;
if (!len)
return 0;
str = (CFStringRef)IOHIDDeviceGetProperty(device, prop);
buf[0] = 0;
if (str) {
len --;
CFIndex str_len = CFStringGetLength(str);
CFRange range;
range.location = 0;
range.length = (str_len > len)? len: str_len;
CFIndex used_buf_len;
CFIndex chars_copied;
chars_copied = CFStringGetBytes(str,
range,
kCFStringEncodingUTF32LE,
(char)'?',
FALSE,
(UInt8*)buf,
len,
&used_buf_len);
buf[chars_copied] = 0;
return (int)chars_copied;
}
else
return 0;
}
static int get_string_property_utf8(IOHIDDeviceRef device, CFStringRef prop, char *buf, size_t len)
{
CFStringRef str;
if (!len)
return 0;
str = (CFStringRef)IOHIDDeviceGetProperty(device, prop);
buf[0] = 0;
if (str) {
len--;
CFIndex str_len = CFStringGetLength(str);
CFRange range;
range.location = 0;
range.length = (str_len > len)? len: str_len;
CFIndex used_buf_len;
CFIndex chars_copied;
chars_copied = CFStringGetBytes(str,
range,
kCFStringEncodingUTF8,
(char)'?',
FALSE,
(UInt8*)buf,
len,
&used_buf_len);
buf[chars_copied] = 0;
return (int)used_buf_len;
}
else
return 0;
}
static int get_serial_number(IOHIDDeviceRef device, wchar_t *buf, size_t len)
{
return get_string_property(device, CFSTR(kIOHIDSerialNumberKey), buf, len);
}
static int get_manufacturer_string(IOHIDDeviceRef device, wchar_t *buf, size_t len)
{
return get_string_property(device, CFSTR(kIOHIDManufacturerKey), buf, len);
}
static int get_product_string(IOHIDDeviceRef device, wchar_t *buf, size_t len)
{
return get_string_property(device, CFSTR(kIOHIDProductKey), buf, len);
}
/* Implementation of wcsdup() for Mac. */
static wchar_t *dup_wcs(const wchar_t *s)
{
size_t len = wcslen(s);
wchar_t *ret = (wchar_t *)malloc((len+1)*sizeof(wchar_t));
wcscpy(ret, s);
return ret;
}
static int make_path(IOHIDDeviceRef device, char *buf, size_t len)
{
int res;
unsigned short vid, pid;
char transport[32];
buf[0] = '\0';
res = get_string_property_utf8(
device, CFSTR(kIOHIDTransportKey),
transport, sizeof(transport));
if (!res)
return -1;
vid = get_vendor_id(device);
pid = get_product_id(device);
res = snprintf(buf, len, "%s_%04hx_%04hx_%p",
transport, vid, pid, device);
buf[len-1] = '\0';
return res+1;
}
/* Initialize the IOHIDManager. Return 0 for success and -1 for failure. */
static int init_hid_manager(void)
{
/* Initialize all the HID Manager Objects */
hid_mgr = IOHIDManagerCreate(kCFAllocatorDefault, kIOHIDOptionsTypeNone);
if (hid_mgr) {
IOHIDManagerSetDeviceMatching(hid_mgr, NULL);
IOHIDManagerScheduleWithRunLoop(hid_mgr, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode);
return 0;
}
return -1;
}
/* Initialize the IOHIDManager if necessary. This is the public function, and
it is safe to call this function repeatedly. Return 0 for success and -1
for failure. */
int HID_API_EXPORT hid_init(void)
{
if (!hid_mgr) {
return init_hid_manager();
}
/* Already initialized. */
return 0;
}
int HID_API_EXPORT hid_exit(void)
{
if (hid_mgr) {
/* Close the HID manager. */
IOHIDManagerClose(hid_mgr, kIOHIDOptionsTypeNone);
CFRelease(hid_mgr);
hid_mgr = NULL;
}
return 0;
}
static void process_pending_events() {
SInt32 res;
do {
res = CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.001, FALSE);
} while(res != kCFRunLoopRunFinished && res != kCFRunLoopRunTimedOut);
}
struct hid_device_info HID_API_EXPORT *hid_enumerate(unsigned short vendor_id, unsigned short product_id)
{
struct hid_device_info *root = NULL; // return object
struct hid_device_info *cur_dev = NULL;
CFIndex num_devices;
int i;
setlocale(LC_ALL,"");
/* Set up the HID Manager if it hasn't been done */
if (hid_init() < 0)
return NULL;
/* give the IOHIDManager a chance to update itself */
process_pending_events();
/* Get a list of the Devices */
CFSetRef device_set = IOHIDManagerCopyDevices(hid_mgr);
if (!device_set)
return NULL;
/* Convert the list into a C array so we can iterate easily. */
num_devices = CFSetGetCount(device_set);
if (!num_devices) {
CFRelease(device_set);
return NULL;
}
IOHIDDeviceRef *device_array = (IOHIDDeviceRef*)calloc(num_devices, sizeof(IOHIDDeviceRef));
CFSetGetValues(device_set, (const void **) device_array);
/* Iterate over each device, making an entry for it. */
for (i = 0; i < num_devices; i++) {
unsigned short dev_vid;
unsigned short dev_pid;
#define BUF_LEN 256
wchar_t buf[BUF_LEN];
char cbuf[BUF_LEN];
IOHIDDeviceRef dev = device_array[i];
if (!dev) {
continue;
}
dev_vid = get_vendor_id(dev);
dev_pid = get_product_id(dev);
/* Check the VID/PID against the arguments */
if ((vendor_id == 0x0 && product_id == 0x0) ||
(vendor_id == dev_vid && product_id == dev_pid)) {
struct hid_device_info *tmp;
size_t len;
/* VID/PID match. Create the record. */
tmp = (struct hid_device_info *)calloc(1, sizeof(struct hid_device_info));
if (cur_dev) {
cur_dev->next = tmp;
}
else {
root = tmp;
}
cur_dev = tmp;
// Get the Usage Page and Usage for this device.
cur_dev->usage_page = get_int_property(dev, CFSTR(kIOHIDPrimaryUsagePageKey));
cur_dev->usage = get_int_property(dev, CFSTR(kIOHIDPrimaryUsageKey));
/* Fill out the record */
cur_dev->next = NULL;
len = make_path(dev, cbuf, sizeof(cbuf));
cur_dev->path = strdup(cbuf);
/* Serial Number */
get_serial_number(dev, buf, BUF_LEN);
cur_dev->serial_number = dup_wcs(buf);
/* Manufacturer and Product strings */
get_manufacturer_string(dev, buf, BUF_LEN);
cur_dev->manufacturer_string = dup_wcs(buf);
get_product_string(dev, buf, BUF_LEN);
cur_dev->product_string = dup_wcs(buf);
/* VID/PID */
cur_dev->vendor_id = dev_vid;
cur_dev->product_id = dev_pid;
/* Release Number */
cur_dev->release_number = get_int_property(dev, CFSTR(kIOHIDVersionNumberKey));
/* Interface Number (Unsupported on Mac)*/
cur_dev->interface_number = -1;
}
}
free(device_array);
CFRelease(device_set);
return root;
}
void HID_API_EXPORT hid_free_enumeration(struct hid_device_info *devs)
{
/* This function is identical to the Linux version. Platform independent. */
struct hid_device_info *d = devs;
while (d) {
struct hid_device_info *next = d->next;
free(d->path);
free(d->serial_number);
free(d->manufacturer_string);
free(d->product_string);
free(d);
d = next;
}
}
hid_device * HID_API_EXPORT hid_open(unsigned short vendor_id, unsigned short product_id, const wchar_t *serial_number)
{
/* This function is identical to the Linux version. Platform independent. */
struct hid_device_info *devs, *cur_dev;
const char *path_to_open = NULL;
hid_device * handle = NULL;
devs = hid_enumerate(vendor_id, product_id);
cur_dev = devs;
while (cur_dev) {
if (cur_dev->vendor_id == vendor_id &&
cur_dev->product_id == product_id) {
if (serial_number) {
if (wcscmp(serial_number, cur_dev->serial_number) == 0) {
path_to_open = cur_dev->path;
break;
}
}
else {
path_to_open = cur_dev->path;
break;
}
}
cur_dev = cur_dev->next;
}
if (path_to_open) {
/* Open the device */
handle = hid_open_path(path_to_open, 0);
}
hid_free_enumeration(devs);
return handle;
}
static void hid_device_removal_callback(void *context, IOReturn result,
void *sender)
{
/* Stop the Run Loop for this device. */
hid_device *dev = (hid_device *)context;
// The device removal callback is sometimes called even after being
// unregistered, leading to a crash when trying to access fields in
// the already freed hid_device. We keep a linked list of all created
// hid_device's so that the one being removed can be checked against
// the list to see if it really hasn't been closed yet and needs to
// be dealt with here.
struct hid_device_list_node *node = device_list;
while (node) {
if (node->dev == dev) {
dev->disconnected = 1;
CFRunLoopStop(dev->run_loop);
break;
}
node = node->next;
}
}
/* The Run Loop calls this function for each input report received.
This function puts the data into a linked list to be picked up by
hid_read(). */
static void hid_report_callback(void *context, IOReturn result, void *sender,
IOHIDReportType report_type, uint32_t report_id,
uint8_t *report, CFIndex report_length)
{
struct input_report *rpt;
hid_device *dev = (hid_device *)context;
/* Make a new Input Report object */
rpt = (struct input_report *)calloc(1, sizeof(struct input_report));
rpt->data = (uint8_t *)calloc(1, report_length);
memcpy(rpt->data, report, report_length);
rpt->len = report_length;
rpt->next = NULL;
/* Lock this section */
pthread_mutex_lock(&dev->mutex);
/* Attach the new report object to the end of the list. */
if (dev->input_reports == NULL) {
/* The list is empty. Put it at the root. */
dev->input_reports = rpt;
}
else {
/* Find the end of the list and attach. */
struct input_report *cur = dev->input_reports;
int num_queued = 0;
while (cur->next != NULL) {
cur = cur->next;
num_queued++;
}
cur->next = rpt;
/* Pop one off if we've reached 30 in the queue. This
way we don't grow forever if the user never reads
anything from the device. */
if (num_queued > 30) {
return_data(dev, NULL, 0);
}
}
/* Signal a waiting thread that there is data. */
pthread_cond_signal(&dev->condition);
/* Unlock */
pthread_mutex_unlock(&dev->mutex);
}
/* This gets called when the read_thred's run loop gets signaled by
hid_close(), and serves to stop the read_thread's run loop. */
static void perform_signal_callback(void *context)
{
hid_device *dev = (hid_device *)context;
CFRunLoopStop(dev->run_loop); //TODO: CFRunLoopGetCurrent()
}
static void *read_thread(void *param)
{
hid_device *dev = (hid_device *)param;
/* Move the device's run loop to this thread. */
IOHIDDeviceScheduleWithRunLoop(dev->device_handle, CFRunLoopGetCurrent(), dev->run_loop_mode);
/* Create the RunLoopSource which is used to signal the
event loop to stop when hid_close() is called. */
CFRunLoopSourceContext ctx;
memset(&ctx, 0, sizeof(ctx));
ctx.version = 0;
ctx.info = dev;
ctx.perform = &perform_signal_callback;
dev->source = CFRunLoopSourceCreate(kCFAllocatorDefault, 0/*order*/, &ctx);
CFRunLoopAddSource(CFRunLoopGetCurrent(), dev->source, dev->run_loop_mode);
/* Store off the Run Loop so it can be stopped from hid_close()
and on device disconnection. */
dev->run_loop = CFRunLoopGetCurrent();
/* Notify the main thread that the read thread is up and running. */
pthread_barrier_wait(&dev->barrier);
/* Run the Event Loop. CFRunLoopRunInMode() will dispatch HID input
reports into the hid_report_callback(). */
SInt32 code;
while (!dev->shutdown_thread && !dev->disconnected) {
code = CFRunLoopRunInMode(dev->run_loop_mode, 1000/*sec*/, FALSE);
/* Return if the device has been disconnected */
if (code == kCFRunLoopRunFinished) {
dev->disconnected = 1;
break;
}
/* Break if The Run Loop returns Finished or Stopped. */
if (code != kCFRunLoopRunTimedOut &&
code != kCFRunLoopRunHandledSource) {
/* There was some kind of error. Setting
shutdown seems to make sense, but
there may be something else more appropriate */
dev->shutdown_thread = 1;
break;
}
}
/* Now that the read thread is stopping, Wake any threads which are
waiting on data (in hid_read_timeout()). Do this under a mutex to
make sure that a thread which is about to go to sleep waiting on
the condition acutally will go to sleep before the condition is
signaled. */
pthread_mutex_lock(&dev->mutex);
pthread_cond_broadcast(&dev->condition);
pthread_mutex_unlock(&dev->mutex);
/* Wait here until hid_close() is called and makes it past
the call to CFRunLoopWakeUp(). This thread still needs to
be valid when that function is called on the other thread. */
pthread_barrier_wait(&dev->shutdown_barrier);
return NULL;
}
hid_device * HID_API_EXPORT hid_open_path(const char *path, int bExclusive)
{
int i;
hid_device *dev = NULL;
CFIndex num_devices;
dev = new_hid_device();
/* Set up the HID Manager if it hasn't been done */
if (hid_init() < 0)
return NULL;
/* give the IOHIDManager a chance to update itself */
process_pending_events();
CFSetRef device_set = IOHIDManagerCopyDevices(hid_mgr);
num_devices = CFSetGetCount(device_set);
IOHIDDeviceRef *device_array = (IOHIDDeviceRef *)calloc(num_devices, sizeof(IOHIDDeviceRef));
CFSetGetValues(device_set, (const void **) device_array);
for (i = 0; i < num_devices; i++) {
char cbuf[BUF_LEN];
size_t len;
IOHIDDeviceRef os_dev = device_array[i];
len = make_path(os_dev, cbuf, sizeof(cbuf));
if (!strcmp(cbuf, path)) {
// Matched Paths. Open this Device.
IOReturn ret = IOHIDDeviceOpen(os_dev, kIOHIDOptionsTypeNone);
if (ret == kIOReturnSuccess) {
char str[32];
free(device_array);
CFRelease(device_set);
dev->device_handle = os_dev;
/* Create the buffers for receiving data */
dev->max_input_report_len = (CFIndex) get_max_report_length(os_dev);
dev->input_report_buf = (uint8_t *)calloc(dev->max_input_report_len, sizeof(uint8_t));
/* Create the Run Loop Mode for this device.
printing the reference seems to work. */
sprintf(str, "HIDAPI_%p", os_dev);
dev->run_loop_mode =
CFStringCreateWithCString(NULL, str, kCFStringEncodingASCII);
/* Attach the device to a Run Loop */
IOHIDDeviceRegisterInputReportCallback(
os_dev, dev->input_report_buf, dev->max_input_report_len,
&hid_report_callback, dev);
IOHIDDeviceRegisterRemovalCallback(dev->device_handle, hid_device_removal_callback, dev);
struct hid_device_list_node *node = (struct hid_device_list_node *)calloc(1, sizeof(struct hid_device_list_node));
node->dev = dev;
node->next = device_list;
device_list = node;
/* Start the read thread */
pthread_create(&dev->thread, NULL, read_thread, dev);
/* Wait here for the read thread to be initialized. */
pthread_barrier_wait(&dev->barrier);
return dev;
}
else {
goto return_error;
}
}
}
return_error:
free(device_array);
CFRelease(device_set);
free_hid_device(dev);
return NULL;
}
static int set_report(hid_device *dev, IOHIDReportType type, const unsigned char *data, size_t length)
{
const char *pass_through_magic = "MAGIC0";
size_t pass_through_magic_length = strlen(pass_through_magic);
unsigned char report_id = data[0];
const unsigned char *data_to_send;
size_t length_to_send;
IOReturn res;
/* Return if the device has been disconnected. */
if (dev->disconnected)
return -1;
if (report_id == 0x0) {
/* Not using numbered Reports.
Don't send the report number. */
data_to_send = data+1;
length_to_send = length-1;
}
else if (length > 6 && memcmp(data, pass_through_magic, pass_through_magic_length) == 0) {
report_id = data[pass_through_magic_length];
data_to_send = data+pass_through_magic_length;
length_to_send = length-pass_through_magic_length;
}
else {
/* Using numbered Reports.
Send the Report Number */
data_to_send = data;
length_to_send = length;
}
if (!dev->disconnected) {
res = IOHIDDeviceSetReport(dev->device_handle,
type,
report_id, /* Report ID*/
data_to_send, length_to_send);
if (res == kIOReturnSuccess) {
return (int)length;
}
else if (res == kIOReturnUnsupported) {
/*printf("kIOReturnUnsupported\n");*/
return -1;
}
else {
/*printf("0x%x\n", res);*/
return -1;
}
}
return -1;
}
int HID_API_EXPORT hid_write(hid_device *dev, const unsigned char *data, size_t length)
{
return set_report(dev, kIOHIDReportTypeOutput, data, length);
}
/* Helper function, so that this isn't duplicated in hid_read(). */
static int return_data(hid_device *dev, unsigned char *data, size_t length)
{
/* Copy the data out of the linked list item (rpt) into the
return buffer (data), and delete the liked list item. */
struct input_report *rpt = dev->input_reports;
size_t len = (length < rpt->len)? length: rpt->len;
memcpy(data, rpt->data, len);
dev->input_reports = rpt->next;
free(rpt->data);
free(rpt);
return (int)len;
}
static int cond_wait(const hid_device *dev, pthread_cond_t *cond, pthread_mutex_t *mutex)
{
while (!dev->input_reports) {
int res = pthread_cond_wait(cond, mutex);
if (res != 0)
return res;
/* A res of 0 means we may have been signaled or it may
be a spurious wakeup. Check to see that there's acutally
data in the queue before returning, and if not, go back
to sleep. See the pthread_cond_timedwait() man page for
details. */
if (dev->shutdown_thread || dev->disconnected)
return -1;
}
return 0;
}
static int cond_timedwait(const hid_device *dev, pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
{
while (!dev->input_reports) {
int res = pthread_cond_timedwait(cond, mutex, abstime);
if (res != 0)
return res;
/* A res of 0 means we may have been signaled or it may
be a spurious wakeup. Check to see that there's acutally
data in the queue before returning, and if not, go back
to sleep. See the pthread_cond_timedwait() man page for
details. */
if (dev->shutdown_thread || dev->disconnected)
return -1;
}
return 0;
}
int HID_API_EXPORT hid_read_timeout(hid_device *dev, unsigned char *data, size_t length, int milliseconds)
{
int bytes_read = -1;
/* Lock the access to the report list. */
pthread_mutex_lock(&dev->mutex);
/* There's an input report queued up. Return it. */
if (dev->input_reports) {
/* Return the first one */
bytes_read = return_data(dev, data, length);
goto ret;
}
/* Return if the device has been disconnected. */
if (dev->disconnected) {
bytes_read = -1;
goto ret;
}
if (dev->shutdown_thread) {
/* This means the device has been closed (or there
has been an error. An error code of -1 should
be returned. */
bytes_read = -1;
goto ret;
}
/* There is no data. Go to sleep and wait for data. */
if (milliseconds == -1) {
/* Blocking */
int res;
res = cond_wait(dev, &dev->condition, &dev->mutex);
if (res == 0)
bytes_read = return_data(dev, data, length);
else {
/* There was an error, or a device disconnection. */
bytes_read = -1;
}
}
else if (milliseconds > 0) {
/* Non-blocking, but called with timeout. */
int res;
struct timespec ts;
struct timeval tv;
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
ts.tv_sec += milliseconds / 1000;
ts.tv_nsec += (milliseconds % 1000) * 1000000;
if (ts.tv_nsec >= 1000000000L) {
ts.tv_sec++;
ts.tv_nsec -= 1000000000L;
}
res = cond_timedwait(dev, &dev->condition, &dev->mutex, &ts);
if (res == 0)
bytes_read = return_data(dev, data, length);
else if (res == ETIMEDOUT)
bytes_read = 0;
else
bytes_read = -1;
}
else {
/* Purely non-blocking */
bytes_read = 0;
}
ret:
/* Unlock */
pthread_mutex_unlock(&dev->mutex);
return bytes_read;
}
int HID_API_EXPORT hid_read(hid_device *dev, unsigned char *data, size_t length)
{
return hid_read_timeout(dev, data, length, (dev->blocking)? -1: 0);
}
int HID_API_EXPORT hid_set_nonblocking(hid_device *dev, int nonblock)
{
/* All Nonblocking operation is handled by the library. */
dev->blocking = !nonblock;
return 0;
}
int HID_API_EXPORT hid_send_feature_report(hid_device *dev, const unsigned char *data, size_t length)
{
return set_report(dev, kIOHIDReportTypeFeature, data, length);
}
int HID_API_EXPORT hid_get_feature_report(hid_device *dev, unsigned char *data, size_t length)
{
CFIndex len = length;
IOReturn res;
/* Return if the device has been unplugged. */
if (dev->disconnected)
return -1;
int skipped_report_id = 0;
int report_number = data[0];
if (report_number == 0x0) {
/* Offset the return buffer by 1, so that the report ID
will remain in byte 0. */
data++;
len--;
skipped_report_id = 1;
}
res = IOHIDDeviceGetReport(dev->device_handle,
kIOHIDReportTypeFeature,
report_number, /* Report ID */
data, &len);
if (res != kIOReturnSuccess)
return -1;
if (skipped_report_id)
len++;
return (int)len;
}
void HID_API_EXPORT hid_close(hid_device *dev)
{
if (!dev)
return;
/* Disconnect the report callback before close. */
if (!dev->disconnected) {
IOHIDDeviceRegisterInputReportCallback(
dev->device_handle, dev->input_report_buf, dev->max_input_report_len,
NULL, dev);
IOHIDDeviceRegisterRemovalCallback(dev->device_handle, NULL, dev);
IOHIDDeviceUnscheduleFromRunLoop(dev->device_handle, dev->run_loop, dev->run_loop_mode);
IOHIDDeviceScheduleWithRunLoop(dev->device_handle, CFRunLoopGetMain(), kCFRunLoopDefaultMode);
}
/* Cause read_thread() to stop. */
dev->shutdown_thread = 1;
/* Wake up the run thread's event loop so that the thread can exit. */
CFRunLoopSourceSignal(dev->source);
CFRunLoopWakeUp(dev->run_loop);
/* Notify the read thread that it can shut down now. */
pthread_barrier_wait(&dev->shutdown_barrier);
/* Wait for read_thread() to end. */
pthread_join(dev->thread, NULL);
/* Close the OS handle to the device, but only if it's not
been unplugged. If it's been unplugged, then calling
IOHIDDeviceClose() will crash. */
if (!dev->disconnected) {
IOHIDDeviceClose(dev->device_handle, kIOHIDOptionsTypeNone);
}
/* Clear out the queue of received reports. */
pthread_mutex_lock(&dev->mutex);
while (dev->input_reports) {
return_data(dev, NULL, 0);
}
pthread_mutex_unlock(&dev->mutex);
free_hid_device(dev);
}
int HID_API_EXPORT_CALL hid_get_manufacturer_string(hid_device *dev, wchar_t *string, size_t maxlen)
{
return get_manufacturer_string(dev->device_handle, string, maxlen);
}
int HID_API_EXPORT_CALL hid_get_product_string(hid_device *dev, wchar_t *string, size_t maxlen)
{
return get_product_string(dev->device_handle, string, maxlen);
}
int HID_API_EXPORT_CALL hid_get_serial_number_string(hid_device *dev, wchar_t *string, size_t maxlen)
{
return get_serial_number(dev->device_handle, string, maxlen);
}
int HID_API_EXPORT_CALL hid_get_indexed_string(hid_device *dev, int string_index, wchar_t *string, size_t maxlen)
{
// TODO:
return 0;
}
HID_API_EXPORT const wchar_t * HID_API_CALL hid_error(hid_device *dev)
{
// TODO:
return NULL;
}
#if 0
static int32_t get_location_id(IOHIDDeviceRef device)
{
return get_int_property(device, CFSTR(kIOHIDLocationIDKey));
}
static int32_t get_usage(IOHIDDeviceRef device)
{
int32_t res;
res = get_int_property(device, CFSTR(kIOHIDDeviceUsageKey));
if (!res)
res = get_int_property(device, CFSTR(kIOHIDPrimaryUsageKey));
return res;
}
static int32_t get_usage_page(IOHIDDeviceRef device)
{
int32_t res;
res = get_int_property(device, CFSTR(kIOHIDDeviceUsagePageKey));
if (!res)
res = get_int_property(device, CFSTR(kIOHIDPrimaryUsagePageKey));
return res;
}
static int get_transport(IOHIDDeviceRef device, wchar_t *buf, size_t len)
{
return get_string_property(device, CFSTR(kIOHIDTransportKey), buf, len);
}
int main(void)
{
IOHIDManagerRef mgr;
int i;
mgr = IOHIDManagerCreate(kCFAllocatorDefault, kIOHIDOptionsTypeNone);
IOHIDManagerSetDeviceMatching(mgr, NULL);
IOHIDManagerOpen(mgr, kIOHIDOptionsTypeNone);
CFSetRef device_set = IOHIDManagerCopyDevices(mgr);
CFIndex num_devices = CFSetGetCount(device_set);
IOHIDDeviceRef *device_array = calloc(num_devices, sizeof(IOHIDDeviceRef));
CFSetGetValues(device_set, (const void **) device_array);
setlocale(LC_ALL, "");
for (i = 0; i < num_devices; i++) {
IOHIDDeviceRef dev = device_array[i];
printf("Device: %p\n", dev);
printf(" %04hx %04hx\n", get_vendor_id(dev), get_product_id(dev));
wchar_t serial[256], buf[256];
char cbuf[256];
get_serial_number(dev, serial, 256);
printf(" Serial: %ls\n", serial);
printf(" Loc: %ld\n", get_location_id(dev));
get_transport(dev, buf, 256);
printf(" Trans: %ls\n", buf);
make_path(dev, cbuf, 256);
printf(" Path: %s\n", cbuf);
}
return 0;
}
#endif
#endif /* SDL_JOYSTICK_HIDAPI */
| libcrosswind/libcrosswind | external/sdl2/SDL2-2/src/hidapi/mac/hid.c | C | gpl-3.0 | 30,863 |
/* NAME Text **/
/* VERSION 1.0.4 **/
/* DESCRIPTION Slightly modified Default theme **/
/* DEVELOPER STUDIOXENIX **/
.identity {
background: #3c5a76 url('https://new-xkit.github.io/XKit/Themes/text/body-background.png') no-repeat center top !important;
}
.selection_nipple,
.permalink {
visibility: hidden !important;
}
.recessed {
background: rgba(0, 0, 0, 0.25) !important;
opacity: 0.8 !important;
-webkit-transition: 0.3s ease-in;
-moz-transition: 0.3s ease-in;
-o-transition: 0.3s ease-in;
transition: 0.3s ease-in;
}
.controls_section:not(.controls_section.follow_list.recommended_tumblelogs) {
box-shadow: 0px -1px 0px 0px rgba(0, 0, 0, 0.09) !important;
border: 1px solid rgba(0, 0, 0, 0.25) !important;
background: rgba(20, 40, 70, 0.25) !important;
opacity: 1 !important;
}
.l-header {
opacity: 0.8 !important;
-webkit-transition: 0.4s ease-in;
-moz-transition: 0.4s ease-in;
-o-transition: 0.4s ease-in;
transition: 0.4s ease-in;
}
.svg-logo {
opacity: 0.5 !important;
-webkit-transition: 0.4s ease-in;
-moz-transition: 0.4s ease-in;
-o-transition: 0.4s ease-in;
transition: 0.4s ease-in;
}
.l-header:hover,
.svg-logo:hover {
opacity: 1 !important;
-webkit-transition: 0.4s ease-in;
-moz-transition: 0.4s ease-in;
-o-transition: 0.4s ease-in;
transition: 0.4s ease-in;
}
a {
-webkit-transition: 0.4s ease-in;
-moz-transition: 0.4s ease-in;
-o-transition: 0.4s ease-in;
transition: 0.4s ease-in;
}
ul.controls_section li {
text-shadow: 0 -1px 0 0 rgba(0, 0, 0, 0.12) !important;
-webkit-transition: 0.3s ease-in;
-moz-transition: 0.3s ease-in;
-o-transition: 0.3s ease-in;
transition: 0.3s ease-in;
}
ul.controls_section li:hover {
backgrjound: rgba(20, 40, 70, 0.35) !important;
-webkit-transition: 0.3s ease-in;
-moz-transition: 0.3s ease-in;
-o-transition: 0.3s ease-in;
transition: 0.3s ease-in;
}
li.post {
box-shadow: 0 -1px 0 0 rgba(0, 0, 0, 0.22) !important;
opacity: 0.99 !important;
-webkit-transition: 0.3s ease-in;
-moz-transition: 0.3s ease-in;
-o-transition: 0.3s ease-in;
transition: 0.3s ease-in;
}
li.post:hover {
opacity: 1 !important;
-webkit-transition: 0.5s ease-in;
-moz-transition: 0.5s ease-in;
-o-transition: 0.5s ease-in;
transition: 0.5s ease-in;
}
.l-content {
background: transparent !important;
margin-top: 8px !important;
padding-top: 0 !important;
}
.l-container.l-container--two-column-dashboard .left_column {
background: transparent;
}
#right_column:after {
display: none !important;
}
.post_avatar,
.post_avatar .post_avatar_link {
background-color: transparent;
}
| estufar/XKit | Themes/text/text.css | CSS | gpl-3.0 | 2,586 |
var namespaceCqrs_1_1Mongo_1_1Repositories =
[
[ "Authentication", "namespaceCqrs_1_1Mongo_1_1Repositories_1_1Authentication.html", "namespaceCqrs_1_1Mongo_1_1Repositories_1_1Authentication" ]
]; | cdmdotnet/CQRS | wiki/docs/4.2/html/namespaceCqrs_1_1Mongo_1_1Repositories.js | JavaScript | lgpl-2.1 | 199 |
// @(#)root/graf:$Id$
// Author: Rene Brun 16/10/95
/*************************************************************************
* Copyright (C) 1995-2000, Rene Brun and Fons Rademakers. *
* All rights reserved. *
* *
* For the licensing terms see $ROOTSYS/LICENSE. *
* For the list of contributors see $ROOTSYS/README/CREDITS. *
*************************************************************************/
#ifndef ROOT_TEllipse
#define ROOT_TEllipse
#include "TObject.h"
#include "TAttLine.h"
#include "TAttFill.h"
#include "TAttBBox2D.h"
class TPoint;
class TEllipse : public TObject, public TAttLine, public TAttFill, public TAttBBox2D {
protected:
Double_t fX1; ///< X coordinate of centre
Double_t fY1; ///< Y coordinate of centre
Double_t fR1; ///< first radius
Double_t fR2; ///< second radius
Double_t fPhimin; ///< Minimum angle (degrees)
Double_t fPhimax; ///< Maximum angle (degrees)
Double_t fTheta; ///< Rotation angle (degrees)
public:
// TEllipse status bits
enum {
kNoEdges = BIT(9) // don't draw lines connecting center to edges
};
TEllipse();
TEllipse(Double_t x1, Double_t y1,Double_t r1,Double_t r2=0,Double_t phimin=0, Double_t phimax=360,Double_t theta=0);
TEllipse(const TEllipse &ellipse);
virtual ~TEllipse();
void Copy(TObject &ellipse) const;
virtual Int_t DistancetoPrimitive(Int_t px, Int_t py);
virtual void Draw(Option_t *option="");
virtual void DrawEllipse(Double_t x1, Double_t y1, Double_t r1,Double_t r2,Double_t phimin, Double_t phimax,Double_t theta,Option_t *option="");
virtual void ExecuteEvent(Int_t event, Int_t px, Int_t py);
Double_t GetX1() const {return fX1;}
Double_t GetY1() const {return fY1;}
Double_t GetR1() const {return fR1;}
Double_t GetR2() const {return fR2;}
Double_t GetPhimin() const {return fPhimin;}
Double_t GetPhimax() const {return fPhimax;}
Double_t GetTheta() const {return fTheta;}
Bool_t GetNoEdges() const;
virtual void ls(Option_t *option="") const;
virtual void Paint(Option_t *option="");
virtual void PaintEllipse(Double_t x1, Double_t y1, Double_t r1,Double_t r2,Double_t phimin, Double_t phimax,Double_t theta,Option_t *option="");
virtual void Print(Option_t *option="") const;
virtual void SavePrimitive(std::ostream &out, Option_t *option = "");
virtual void SetNoEdges(Bool_t noEdges=kTRUE); // *TOGGLE* *GETTER=GetNoEdges
virtual void SetPhimin(Double_t phi=0) {fPhimin=phi;} // *MENU*
virtual void SetPhimax(Double_t phi=360) {fPhimax=phi;} // *MENU*
virtual void SetR1(Double_t r1) {fR1=r1;} // *MENU*
virtual void SetR2(Double_t r2) {fR2=r2;} // *MENU*
virtual void SetTheta(Double_t theta=0) {fTheta=theta;} // *MENU*
virtual void SetX1(Double_t x1) {fX1=x1;} // *MENU*
virtual void SetY1(Double_t y1) {fY1=y1;} // *MENU*
virtual Rectangle_t GetBBox();
virtual TPoint GetBBoxCenter();
virtual void SetBBoxCenter(const TPoint &p);
virtual void SetBBoxCenterX(const Int_t x);
virtual void SetBBoxCenterY(const Int_t y);
virtual void SetBBoxX1(const Int_t x);
virtual void SetBBoxX2(const Int_t x);
virtual void SetBBoxY1(const Int_t y);
virtual void SetBBoxY2(const Int_t y);
ClassDef(TEllipse,3) //An ellipse
};
#endif
| karies/root | graf2d/graf/inc/TEllipse.h | C | lgpl-2.1 | 3,811 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="ja">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Style-Type" content="text/css">
<link rel="up" title="FatFs" href="../00index_j.html">
<link rel="alternate" hreflang="en" title="English" href="../en/close.html">
<link rel="stylesheet" href="../css_j.css" type="text/css" media="screen" title="ELM Default">
<title>FatFs - f_close</title>
</head>
<body>
<div class="para func">
<h2>f_close</h2>
<p>ファイルを閉じます。</p>
<pre>
FRESULT f_close (
FIL* <span class="arg">fp</span> <span class="c">/* [IN] ファイル オブジェクトへのポインタ */</span>
);
</pre>
</div>
<div class="para arg">
<h4>引数</h4>
<dl class="par">
<dt>fp</dt>
<dd>閉じようとするファイルのファイル オブジェクト構造体へのポインタを指定します。</dd>
</dl>
</div>
<div class="para ret">
<h4>戻り値</h4>
<p>
<a href="rc.html#ok">FR_OK</a>,
<a href="rc.html#de">FR_DISK_ERR</a>,
<a href="rc.html#ie">FR_INT_ERR</a>,
<a href="rc.html#io">FR_INVALID_OBJECT</a>,
<a href="rc.html#tm">FR_TIMEOUT</a>
</p>
</div>
<div class="para desc">
<h4>解説</h4>
<p>ファイルを閉じます。何らかの書き込みの行われたファイルの場合、キャッシュされた状態(リード/ライト バッファ上のデータ、変更されたFATやディレクトリ項目)はディスクに書き戻されます。関数が正常終了すると、そのファイル オブジェクトは無効になり、そのメモリも解放できます。</p>
<p>ファイル オブジェクトが読み出し専用モードで、<tt>_FS_LOCK</tt>オプションが選択されていない場合は、ファイルを閉じずにファイル オブジェクトを破棄することもできます。しかし、これは将来の互換性の点で推奨はされません。</p>
</div>
<div class="para comp">
<h4>対応情報</h4>
<p>全ての構成で使用可能です。</p>
</div>
<div class="para ref">
<h4>参照</h4>
<tt><a href="open.html">f_open</a>, <a href="read.html">f_read</a>, <a href="write.html">f_write</a>, <a href="sync.html">f_sync</a>, <a href="sfile.html">FIL</a>, <a href="sfatfs.html">FATFS</a></tt>
</div>
<p class="foot"><a href="../00index_j.html">戻る</a></p>
</body>
</html>
| hyller/CodeLibrary | stm32cubef1/STM32Cube_FW_F1_V1.3.0/Middlewares/Third_Party/FatFs/doc/ja/close.html | HTML | unlicense | 2,473 |
"""
Base class for Pipeline API data loaders.
"""
from abc import (
ABCMeta,
abstractmethod,
)
from six import with_metaclass
class PipelineLoader(with_metaclass(ABCMeta)):
"""
ABC for classes that can load data for use with zipline.pipeline APIs.
TODO: DOCUMENT THIS MORE!
"""
@abstractmethod
def load_adjusted_array(self, columns, dates, assets, mask):
pass
| wilsonkichoi/zipline | zipline/pipeline/loaders/base.py | Python | apache-2.0 | 405 |
/*
* Copyright 2013 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrBicubicTextureEffect_DEFINED
#define GrBicubicTextureEffect_DEFINED
#include "GrSingleTextureEffect.h"
#include "GrTextureDomain.h"
#include "glsl/GrGLSLFragmentProcessor.h"
class GrGLBicubicEffect;
class GrInvariantOutput;
class GrBicubicEffect : public GrSingleTextureEffect {
public:
enum {
kFilterTexelPad = 2, // Given a src rect in texels to be filtered, this number of
// surrounding texels are needed by the kernel in x and y.
};
virtual ~GrBicubicEffect();
const float* coefficients() const { return fCoefficients; }
const char* name() const override { return "Bicubic"; }
const GrTextureDomain& domain() const { return fDomain; }
/**
* Create a simple filter effect with custom bicubic coefficients and optional domain.
*/
static const GrFragmentProcessor* Create(GrTexture* tex, const SkScalar coefficients[16],
const SkRect* domain = nullptr) {
if (nullptr == domain) {
static const SkShader::TileMode kTileModes[] = { SkShader::kClamp_TileMode,
SkShader::kClamp_TileMode };
return Create(tex, coefficients, GrCoordTransform::MakeDivByTextureWHMatrix(tex),
kTileModes);
} else {
return new GrBicubicEffect(tex, coefficients,
GrCoordTransform::MakeDivByTextureWHMatrix(tex), *domain);
}
}
/**
* Create a Mitchell filter effect with specified texture matrix and x/y tile modes.
*/
static const GrFragmentProcessor* Create(GrTexture* tex, const SkMatrix& matrix,
const SkShader::TileMode tileModes[2]) {
return Create(tex, gMitchellCoefficients, matrix, tileModes);
}
/**
* Create a filter effect with custom bicubic coefficients, the texture matrix, and the x/y
* tilemodes.
*/
static const GrFragmentProcessor* Create(GrTexture* tex, const SkScalar coefficients[16],
const SkMatrix& matrix,
const SkShader::TileMode tileModes[2]) {
return new GrBicubicEffect(tex, coefficients, matrix, tileModes);
}
/**
* Create a Mitchell filter effect with a texture matrix and a domain.
*/
static const GrFragmentProcessor* Create(GrTexture* tex, const SkMatrix& matrix,
const SkRect& domain) {
return new GrBicubicEffect(tex, gMitchellCoefficients, matrix, domain);
}
/**
* Determines whether the bicubic effect should be used based on the transformation from the
* local coords to the device. Returns true if the bicubic effect should be used. filterMode
* is set to appropriate filtering mode to use regardless of the return result (e.g. when this
* returns false it may indicate that the best fallback is to use kMipMap, kBilerp, or
* kNearest).
*/
static bool ShouldUseBicubic(const SkMatrix& localCoordsToDevice,
GrTextureParams::FilterMode* filterMode);
private:
GrBicubicEffect(GrTexture*, const SkScalar coefficients[16], const SkMatrix &matrix,
const SkShader::TileMode tileModes[2]);
GrBicubicEffect(GrTexture*, const SkScalar coefficients[16], const SkMatrix &matrix,
const SkRect& domain);
GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
void onGetGLSLProcessorKey(const GrGLSLCaps&, GrProcessorKeyBuilder*) const override;
bool onIsEqual(const GrFragmentProcessor&) const override;
void onComputeInvariantOutput(GrInvariantOutput* inout) const override;
float fCoefficients[16];
GrTextureDomain fDomain;
GR_DECLARE_FRAGMENT_PROCESSOR_TEST;
static const SkScalar gMitchellCoefficients[16];
typedef GrSingleTextureEffect INHERITED;
};
#endif
| tmpvar/skia.cc | src/gpu/effects/GrBicubicEffect.h | C | apache-2.0 | 4,198 |
WickedPdf.config = {
#:wkhtmltopdf => '/usr/local/bin/wkhtmltopdf',
#:layout => "pdf.html",
:exe_path => '/usr/local/bin/wkhtmltopdf'
}
| prafula/FedenaT | vendor/plugins/wicked_pdf/generators/wicked_pdf/templates/wicked_pdf.rb | Ruby | apache-2.0 | 142 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeConfigFile is the path to a kubeconfig file.
KubeConfigFile string `json:"kubeconfig"`
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string `json:"acceptContentTypes"`
// contentType is the content type used when sending data to the server from this client.
ContentType string `json:"contentType"`
// cps controls the number of queries per second allowed for this connection.
QPS float32 `json:"qps"`
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int `json:"burst"`
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the pure iptables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32 `json:"masqueradeBit"`
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool `json:"masqueradeAll"`
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration `json:"syncPeriod"`
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// max is the maximum number of NAT connections to track (0 to
// leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.
Max int32 `json:"max"`
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore conntrackMin).
MaxPerCore int32 `json:"maxPerCore"`
// min is the minimum value of connect-tracking records to allocate,
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
Min int32 `json:"min"`
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0.
TCPEstablishedTimeout metav1.Duration `json:"tcpEstablishedTimeout"`
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout metav1.Duration `json:"tcpCloseWaitTimeout"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta `json:",inline"`
// featureGates is a comma-separated list of key=value pairs that control
// which alpha/beta features are enabled.
//
// TODO this really should be a map but that requires refactoring all
// components to use config files because local-up-cluster.sh only supports
// the --feature-gates flag right now, which is comma-separated key=value
// pairs.
FeatureGates string `json:"featureGates"`
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string `json:"bindAddress"`
// healthzBindAddress is the IP address and port for the health check server to serve on,
// defaulting to 0.0.0.0:10256
HealthzBindAddress string `json:"healthzBindAddress"`
// metricsBindAddress is the IP address and port for the metrics server to serve on,
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
MetricsBindAddress string `json:"metricsBindAddress"`
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool `json:"enableProfiling"`
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
// bridge traffic coming from outside of the cluster. If not provided,
// no off-cluster bridging will be performed.
ClusterCIDR string `json:"clusterCIDR"`
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration `json:"clientConnection"`
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration `json:"iptables"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32 `json:"oomScoreAdj"`
// mode specifies which proxy mode to use.
Mode ProxyMode `json:"mode"`
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string `json:"portRange"`
// resourceContainer is the bsolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string `json:"resourceContainer"`
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout metav1.Duration `json:"udpTimeoutMilliseconds"`
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"`
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
// (newer, faster). If blank, use the best-available proxy (currently iptables, but may
// change in future versions). If the iptables proxy is selected, regardless of how, but
// the system's kernel or iptables versions are insufficient, this always falls back to the
// userspace proxy.
type ProxyMode string
const (
ProxyModeUserspace ProxyMode = "userspace"
ProxyModeIPTables ProxyMode = "iptables"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type KubeSchedulerConfiguration struct {
metav1.TypeMeta `json:",inline"`
// port is the port that the scheduler's http service runs on.
Port int `json:"port"`
// address is the IP address to serve on.
Address string `json:"address"`
// algorithmProvider is the scheduling algorithm provider to use.
AlgorithmProvider string `json:"algorithmProvider"`
// policyConfigFile is the filepath to the scheduler policy configuration.
PolicyConfigFile string `json:"policyConfigFile"`
// enableProfiling enables profiling via web interface.
EnableProfiling *bool `json:"enableProfiling"`
// enableContentionProfiling enables lock contention profiling, if enableProfiling is true.
EnableContentionProfiling bool `json:"enableContentionProfiling"`
// contentType is contentType of requests sent to apiserver.
ContentType string `json:"contentType"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.
KubeAPIBurst int `json:"kubeAPIBurst"`
// schedulerName is name of the scheduler, used to select which pods
// will be processed by this scheduler, based on pod's "spec.SchedulerName".
SchedulerName string `json:"schedulerName"`
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
FailureDomains string `json:"failureDomains"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
// LockObjectNamespace defines the namespace of the lock object
LockObjectNamespace string `json:"lockObjectNamespace"`
// LockObjectName defines the lock object name
LockObjectName string `json:"lockObjectName"`
// PolicyConfigMapName is the name of the ConfigMap object that specifies
// the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler
// uses PolicyConfigFile. If UseLegacyPolicyConfig is false and
// PolicyConfigMapName is not empty, the ConfigMap object with this name must
// exist in PolicyConfigMapNamespace before scheduler initialization.
PolicyConfigMapName string `json:"policyConfigMapName"`
// PolicyConfigMapNamespace is the namespace where the above policy config map
// is located. If none is provided default system namespace ("kube-system")
// will be used.
PolicyConfigMapNamespace string `json:"policyConfigMapNamespace"`
// UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and
// to use PolicyConfigFile if available.
UseLegacyPolicyConfig bool `json:"useLegacyPolicyConfig"`
}
// HairpinMode denotes how the kubelet should configure networking to handle
// hairpin packets.
type HairpinMode string
// Enum settings for different ways to handle hairpin packets.
const (
// Set the hairpin flag on the veth of containers in the respective
// container runtime.
HairpinVeth = "hairpin-veth"
// Make the container bridge promiscuous. This will force it to accept
// hairpin packets, even if the flag isn't set on ports of the bridge.
PromiscuousBridge = "promiscuous-bridge"
// Neither of the above. If the kubelet is started in this hairpin mode
// and kube-proxy is running in iptables mode, hairpin packets will be
// dropped by the container bridge.
HairpinNone = "none"
)
// LeaderElectionConfiguration defines the configuration of leader election
// clients for components that can run with leader election enabled.
type LeaderElectionConfiguration struct {
// leaderElect enables a leader election client to gain leadership
// before executing the main loop. Enable this when running replicated
// components for high availability.
LeaderElect *bool `json:"leaderElect"`
// leaseDuration is the duration that non-leader candidates will wait
// after observing a leadership renewal until attempting to acquire
// leadership of a led but unrenewed leader slot. This is effectively the
// maximum duration that a leader can be stopped before it is replaced
// by another candidate. This is only applicable if leader election is
// enabled.
LeaseDuration metav1.Duration `json:"leaseDuration"`
// renewDeadline is the interval between attempts by the acting master to
// renew a leadership slot before it stops leading. This must be less
// than or equal to the lease duration. This is only applicable if leader
// election is enabled.
RenewDeadline metav1.Duration `json:"renewDeadline"`
// retryPeriod is the duration the clients should wait between attempting
// acquisition and renewal of a leadership. This is only applicable if
// leader election is enabled.
RetryPeriod metav1.Duration `json:"retryPeriod"`
// resourceLock indicates the resource object type that will be used to lock
// during leader election cycles.
ResourceLock string `json:"resourceLock"`
}
const (
// "kube-system" is the default scheduler lock object namespace
SchedulerDefaultLockObjectNamespace string = "kube-system"
// "kube-scheduler" is the default scheduler lock object name
SchedulerDefaultLockObjectName = "kube-scheduler"
)
| wlan0/kubernetes | pkg/apis/componentconfig/v1alpha1/types.go | GO | apache-2.0 | 12,824 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package
///////////////
package org.apache.jena.ontology.impl;
// Imports
///////////////
import java.io.ByteArrayInputStream;
import java.util.*;
import org.apache.jena.enhanced.EnhGraph ;
import org.apache.jena.ontology.* ;
import org.apache.jena.ontology.impl.OntClassImpl ;
import org.apache.jena.rdf.model.* ;
import org.apache.jena.reasoner.Reasoner ;
import org.apache.jena.reasoner.ReasonerRegistry ;
import org.apache.jena.reasoner.test.TestUtil ;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import junit.framework.TestCase;
/**
* <p>
* Unit tests on ont models with reasoning
* </p>
*/
public class TestOntReasoning
extends TestCase
{
// Constants
//////////////////////////////////
public static final String BASE = "http://jena.hpl.hp.com/testing/ontology";
public static final String NS = BASE + "#";
// Static variables
//////////////////////////////////
// Instance variables
//////////////////////////////////
// Constructors
//////////////////////////////////
public TestOntReasoning( String name ) {
super( name );
}
// External signature methods
//////////////////////////////////
@Override
public void setUp() {
// ensure the ont doc manager is in a consistent state
OntDocumentManager.getInstance().reset( true );
}
public void testSubClassDirectTransInf1a() {
OntModel m = ModelFactory.createOntologyModel( ProfileRegistry.OWL_LITE_LANG );
OntClass A = m.createClass( NS + "A" );
OntClass B = m.createClass( NS + "B" );
OntClass C = m.createClass( NS + "C" );
OntClass D = m.createClass( NS + "D" );
A.addSubClass( B );
A.addSubClass( C );
C.addSubClass( D );
iteratorTest( A.listSubClasses(), new Object[] {B, C, D} );
iteratorTest( A.listSubClasses( true ), new Object[] {B, C} );
}
public void testSubClassDirectTransInf1b() {
OntModel m = ModelFactory.createOntologyModel( ProfileRegistry.OWL_LITE_LANG );
OntClass A = m.createClass( NS + "A" );
OntClass B = m.createClass( NS + "B" );
OntClass C = m.createClass( NS + "C" );
OntClass D = m.createClass( NS + "D" );
A.addSubClass( B );
A.addSubClass( C );
C.addSubClass( D );
A.addSubClass( D ); // directly asserts a link that could be inferred
iteratorTest( A.listSubClasses(), new Object[] {B, C, D} );
iteratorTest( A.listSubClasses( true ), new Object[] {B, C} );
}
public void testSubClassDirectTransInf2a() {
// test the code path for generating direct sc with no reasoner
OntModelSpec spec = new OntModelSpec( OntModelSpec.OWL_LITE_MEM );
spec.setReasonerFactory( null );
OntModel m = ModelFactory.createOntologyModel( spec, null );
OntClass A = m.createClass( NS + "A" );
OntClass B = m.createClass( NS + "B" );
OntClass C = m.createClass( NS + "C" );
OntClass D = m.createClass( NS + "D" );
A.addSubClass( B );
A.addSubClass( C );
C.addSubClass( D );
iteratorTest( A.listSubClasses(), new Object[] {B, C} );
iteratorTest( A.listSubClasses( true ), new Object[] {B, C} );
}
public void testSubClassDirectTransInf2b() {
// test the code path for generating direct sc with no reasoner
OntModelSpec spec = new OntModelSpec( OntModelSpec.OWL_LITE_MEM );
spec.setReasonerFactory( null );
OntModel m = ModelFactory.createOntologyModel( spec, null );
OntClass A = m.createClass( NS + "A" );
OntClass B = m.createClass( NS + "B" );
OntClass C = m.createClass( NS + "C" );
OntClass D = m.createClass( NS + "D" );
A.addSubClass( B );
A.addSubClass( C );
C.addSubClass( D );
A.addSubClass( D ); // directly asserts a link that could be inferred
iteratorTest( A.listSubClasses(), new Object[] {B, C, D} );
iteratorTest( A.listSubClasses( true ), new Object[] {B, C} );
}
public void testListSuperClassesDirect() {
String ns = "http://example.org/test#";
OntModel m0 = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM );
OntClass c0 = m0.createClass( ns + "C0" );
OntClass c1 = m0.createClass( ns + "C1" );
OntClass c2 = m0.createClass( ns + "C2" );
OntClass c3 = m0.createClass( ns + "C3" );
c0.addSubClass( c1 );
c1.addSubClass( c2 );
c2.addEquivalentClass( c3 );
// now c1 is the direct super-class of c2, even allowing for the equiv with c3
assertFalse( "pass 1: c0 should not be a direct super of c2", c2.hasSuperClass( c0, true ) );
assertFalse( "pass 1: c3 should not be a direct super of c2", c2.hasSuperClass( c3, true ) );
assertFalse( "pass 1: c2 should not be a direct super of c2", c2.hasSuperClass( c2, true ) );
assertTrue( "pass 1: c1 should be a direct super of c2", c2.hasSuperClass( c1, true ) );
// second pass - with inference
m0 = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM_RULE_INF );
c0 = m0.createClass( ns + "C0" );
c1 = m0.createClass( ns + "C1" );
c2 = m0.createClass( ns + "C2" );
c3 = m0.createClass( ns + "C3" );
c0.addSubClass( c1 );
c1.addSubClass( c2 );
c2.addEquivalentClass( c3 );
// now c1 is the direct super-class of c2, even allowing for the equiv with c3
assertFalse( "pass 2: c0 should not be a direct super of c2", c2.hasSuperClass( c0, true ) );
assertFalse( "pass 2: c3 should not be a direct super of c2", c2.hasSuperClass( c3, true ) );
assertFalse( "pass 2: c2 should not be a direct super of c2", c2.hasSuperClass( c2, true ) );
assertTrue( "pass 2: c1 should be a direct super of c2", c2.hasSuperClass( c1, true ) );
}
public void testSubPropertyDirectTransInf1a() {
OntModel m = ModelFactory.createOntologyModel( ProfileRegistry.OWL_LITE_LANG );
OntProperty p = m.createObjectProperty( NS + "p" );
OntProperty q = m.createObjectProperty( NS + "q" );
OntProperty r = m.createObjectProperty( NS + "r" );
OntProperty s = m.createObjectProperty( NS + "s" );
p.addSubProperty( q );
p.addSubProperty( r );
r.addSubProperty( s );
iteratorTest( p.listSubProperties(), new Object[] {p,q,r,s} );
iteratorTest( p.listSubProperties( true ), new Object[] {q,r} );
}
public void testSubPropertyDirectTransInf1b() {
OntModel m = ModelFactory.createOntologyModel( ProfileRegistry.OWL_LITE_LANG );
OntProperty p = m.createObjectProperty( NS + "p" );
OntProperty q = m.createObjectProperty( NS + "q" );
OntProperty r = m.createObjectProperty( NS + "r" );
OntProperty s = m.createObjectProperty( NS + "s" );
p.addSubProperty( q );
p.addSubProperty( r );
r.addSubProperty( s );
p.addSubProperty( s ); // directly asserts a link that could be inferred
iteratorTest( p.listSubProperties(), new Object[] {p,q,r,s} );
iteratorTest( p.listSubProperties( true ), new Object[] {q,r} );
}
public void testSubPropertyDirectTransInf2a() {
// test the code path for generating direct sc with no reasoner
OntModelSpec spec = new OntModelSpec( OntModelSpec.OWL_LITE_MEM );
spec.setReasonerFactory( null );
OntModel m = ModelFactory.createOntologyModel( spec, null );
OntProperty p = m.createObjectProperty( NS + "p" );
OntProperty q = m.createObjectProperty( NS + "q" );
OntProperty r = m.createObjectProperty( NS + "r" );
OntProperty s = m.createObjectProperty( NS + "s" );
p.addSubProperty( q );
p.addSubProperty( r );
r.addSubProperty( s );
iteratorTest( p.listSubProperties(), new Object[] {q,r} );
iteratorTest( p.listSubProperties( true ), new Object[] {q,r} );
}
public void testSubPropertyDirectTransInf2b() {
// test the code path for generating direct sc with no reasoner
OntModelSpec spec = new OntModelSpec( OntModelSpec.OWL_LITE_MEM );
spec.setReasonerFactory( null );
OntModel m = ModelFactory.createOntologyModel( spec, null );
OntProperty p = m.createObjectProperty( NS + "p" );
OntProperty q = m.createObjectProperty( NS + "q" );
OntProperty r = m.createObjectProperty( NS + "r" );
OntProperty s = m.createObjectProperty( NS + "s" );
p.addSubProperty( q );
p.addSubProperty( r );
r.addSubProperty( s );
p.addSubProperty( s ); // directly asserts a link that could be inferred
iteratorTest( p.listSubProperties(), new Object[] {q,r,s} );
iteratorTest( p.listSubProperties( true ), new Object[] {q,r} );
}
public void testListDeclaredProperties0() {
OntModel m = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM_RULE_INF, null );
// a simple class hierarchy organism -> vertebrate -> mammal -> dog
OntClass organism = m.createClass( NS + "Organism" );
OntClass vertebrate = m.createClass( NS + "Vertebrate" );
OntClass mammal = m.createClass( NS + "Mammal" );
OntClass dog = m.createClass( NS + "Dog" );
organism.addSubClass( vertebrate );
vertebrate.addSubClass( mammal );
mammal.addSubClass( dog );
// hair as a covering
OntClass covering = m.createClass( NS + "Covering" );
Individual hair = m.createIndividual( NS+"hair", covering );
// various properties
DatatypeProperty limbsCount = m.createDatatypeProperty( NS + "limbsCount" );
DatatypeProperty hasCovering = m.createDatatypeProperty( NS + "hasCovering" );
DatatypeProperty numYoung = m.createDatatypeProperty( NS + "numYoung" );
// vertebrates have limbs, mammals have live young
limbsCount.addDomain( vertebrate );
numYoung.addDomain( mammal );
// mammals have-covering = hair
Restriction r = m.createRestriction( hasCovering );
r.convertToHasValueRestriction( hair );
mammal.addSuperClass( r );
iteratorTest( organism.listDeclaredProperties(), new Object[] {hasCovering} );
iteratorTest( vertebrate.listDeclaredProperties(), new Object[] {limbsCount, hasCovering} );
iteratorTest( mammal.listDeclaredProperties(), new Object[] {limbsCount, hasCovering, numYoung} );
iteratorTest( dog.listDeclaredProperties(), new Object[] {limbsCount, hasCovering, numYoung} );
iteratorTest( r.listDeclaredProperties(), new Object[] {hasCovering} );
iteratorTest( organism.listDeclaredProperties(true), new Object[] {hasCovering} );
iteratorTest( vertebrate.listDeclaredProperties(true), new Object[] {limbsCount} );
iteratorTest( mammal.listDeclaredProperties(true), new Object[] {numYoung} );
iteratorTest( dog.listDeclaredProperties(true), new Object[] {} );
iteratorTest( r.listDeclaredProperties(true), new Object[] {hasCovering} );
iteratorTest( organism.listDeclaredProperties(false), new Object[] {hasCovering} );
iteratorTest( vertebrate.listDeclaredProperties(false), new Object[] {hasCovering,limbsCount} );
iteratorTest( mammal.listDeclaredProperties(false), new Object[] {hasCovering,numYoung,limbsCount} );
iteratorTest( dog.listDeclaredProperties(false), new Object[] {hasCovering,numYoung,limbsCount} );
iteratorTest( r.listDeclaredProperties(false), new Object[] {hasCovering} );
}
/**
* Test LDP with anonymous classes
*/
public void testListDeclaredProperties1() {
OntModel m = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM );
OntProperty p = m.createOntProperty( NS + "p" );
OntClass a = m.createClass( NS + "a" );
Restriction r = m.createMinCardinalityRestriction( null, p, 1 );
r.addSubClass( a );
Iterator<OntProperty> i = a.listDeclaredProperties();
TestUtil.assertIteratorLength( a.listDeclaredProperties(), 1 );
}
/** Test LDP with resources in different sub-models */
public void testListDeclaredProperties2() {
OntModel m0 = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM );
// in model M0, p0 has class c0 in the domain
OntClass c0 = m0.createClass( NS + "c0" );
ObjectProperty p0 = m0.createObjectProperty( NS + "p0" );
p0.setDomain( c0 );
// in model M1, class c1 is a subClass of c0
OntModel m1 = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM );
OntClass c1 = m1.createClass( NS + "c1" );
c1.addSuperClass( c0 );
// simulate imports
m1.addSubModel( m0 );
// get a c0 reference from m1
OntClass cc0 = m1.getOntClass( NS + "c0" );
assertNotNull( cc0 );
TestUtil.assertIteratorValues( this, c1.listDeclaredProperties(), new Object[] {p0} );
TestUtil.assertIteratorValues( this, c0.listDeclaredProperties(false), new Object[] {p0} );
TestUtil.assertIteratorValues( this, cc0.listDeclaredProperties(false), new Object[] {p0} );
}
/**
* Problem reported by Andy Seaborne - combine abox and tbox in RDFS with
* ontmodel
*/
public void testRDFSAbox() {
String sourceT =
"<rdf:RDF "
+ " xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'"
+ " xmlns:rdfs='http://www.w3.org/2000/01/rdf-schema#'"
+ " xmlns:owl=\"http://www.w3.org/2002/07/owl#\">"
+ " <owl:Class rdf:about='http://example.org/foo#A'>"
+ " </owl:Class>"
+ "</rdf:RDF>";
String sourceA =
"<rdf:RDF "
+ " xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'"
+ " xmlns:rdfs='http://www.w3.org/2000/01/rdf-schema#' "
+ " xmlns:owl=\"http://www.w3.org/2002/07/owl#\">"
+ " <rdf:Description rdf:about='http://example.org/foo#x'>"
+ " <rdf:type rdf:resource='http://example.org/foo#A' />"
+ " </rdf:Description>"
+ "</rdf:RDF>";
Model tBox = ModelFactory.createDefaultModel();
tBox.read(new ByteArrayInputStream(sourceT.getBytes()), "http://example.org/foo");
Model aBox = ModelFactory.createDefaultModel();
aBox.read(new ByteArrayInputStream(sourceA.getBytes()), "http://example.org/foo");
Reasoner reasoner = ReasonerRegistry.getOWLReasoner();
reasoner = reasoner.bindSchema(tBox);
OntModelSpec spec = new OntModelSpec(OntModelSpec.OWL_MEM_RULE_INF);
spec.setReasoner(reasoner);
OntModel m = ModelFactory.createOntologyModel(spec, aBox);
List<Individual> inds = new ArrayList<>();
for (Iterator<Individual> i = m.listIndividuals(); i.hasNext();) {
inds.add(i.next());
}
assertTrue("x should be an individual", inds.contains(m.getResource("http://example.org/foo#x")));
}
public void testInvokeDirectClassReasoning() {
OntModel m = ModelFactory.createOntologyModel(OntModelSpec.RDFS_MEM_TRANS_INF, null);
Resource a = m.createResource("http://example.org#A");
Resource b = m.createResource("http://example.org#B");
OntClass A = new OntClassImpl(a.asNode(), (EnhGraph) m) {
@Override
protected boolean hasSuperClassDirect(Resource cls) {
throw new RuntimeException("did not find direct reasoner");
}
};
// will throw an exception if the wrong code path is taken
A.hasSuperClass(b, true);
}
public void testListIndividualsWithReasoner() {
OntModel m = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM_RDFS_INF );
OntClass C = m.createClass( NS + "C" );
Resource a = m.createResource( NS + "a", C );
TestUtil.assertIteratorValues( this, m.listIndividuals(), new Object[] {a} );
}
/**
* Bug report by kers - maximal lower elements calculation not correct in models
* with no reasoner. Manifests as direct sub-class bug.
*/
public void testListSubClassesDirectNoReasoner() {
OntModel m = ModelFactory.createOntologyModel( OntModelSpec.RDFS_MEM );
OntClass r = m.createClass( NS + "r" );
OntClass a = m.createClass( NS + "a" );
OntClass b = m.createClass( NS + "b" );
OntClass c = m.createClass( NS + "c" );
OntClass d = m.createClass( NS + "d" );
OntClass e = m.createClass( NS + "e" );
OntClass f = m.createClass( NS + "f" );
OntClass g = m.createClass( NS + "g" );
g.addSuperClass( c );
f.addSuperClass( c );
e.addSuperClass( b );
d.addSuperClass( b );
c.addSuperClass( a );
b.addSuperClass( a );
// simulated closure
r.addSubClass( a );
r.addSubClass( b );
r.addSubClass( c );
r.addSubClass( d );
r.addSubClass( e );
r.addSubClass( f );
r.addSubClass( g );
TestUtil.assertIteratorValues( this, r.listSubClasses( true ), new Object[] {a} );
}
public void testOwlLiteClasses() {
OntModel model = ModelFactory.createOntologyModel( OntModelSpec.OWL_LITE_MEM_TRANS_INF );
OntClass b = model.createClass( NS + "B" );
OntProperty p0 = model.createOntProperty( NS + "p0" );
ObjectProperty p1 = model.createObjectProperty( NS + "p1" );
DatatypeProperty p2 = model.createDatatypeProperty( NS + "p2" );
Individual i0 = model.createIndividual( NS + "i0", b );
model.setStrictMode( true );
for (OntResource r: new OntResource[] {b,p0,p1,p2}) {
assertFalse( r + " should not be an individual", r.canAs( Individual.class ));
}
}
/** Bugrep from Benson Margulies: see
* <a href="https://issues.apache.org/jira/browse/JENA-21">JENA-21</a>
*/
public void testBM0() {
OntModel m = ModelFactory.createOntologyModel( OntModelSpec.OWL_MEM_RDFS_INF );
// should not throw NPE:
m.listStatements( null, null, (RDFNode) null, null );
}
// Internal implementation methods
//////////////////////////////////
/** Test that an iterator delivers the expected values */
protected void iteratorTest( Iterator<?> i, Object[] expected ) {
Logger logger = LoggerFactory.getLogger( getClass() );
List<Object> expList = new ArrayList<>();
for ( Object anExpected : expected )
{
expList.add( anExpected );
}
while (i.hasNext()) {
Object next = i.next();
// debugging
if (!expList.contains( next )) {
logger.debug( getName() + " - Unexpected iterator result: " + next );
}
assertTrue( "Value " + next + " was not expected as a result from this iterator ", expList.contains( next ) );
assertTrue( "Value " + next + " was not removed from the list ", expList.remove( next ) );
}
if (!(expList.size() == 0)) {
logger.debug( getName() + " Expected iterator results not found" );
for ( Object anExpList : expList )
{
logger.debug( getName() + " - missing: " + anExpList );
}
}
assertEquals( "There were expected elements from the iterator that were not found", 0, expList.size() );
}
//==============================================================================
// Inner class definitions
//==============================================================================
}
| CesarPantoja/jena | jena-core/src/test/java/org/apache/jena/ontology/impl/TestOntReasoning.java | Java | apache-2.0 | 20,778 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_65) on Wed Nov 20 18:46:17 PST 2013 -->
<TITLE>
FragmentPackageName
</TITLE>
<META NAME="date" CONTENT="2013-11-20">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="FragmentPackageName";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV CLASS
<A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.Factory.html" title="interface in com.google.gwt.inject.rebind.output"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?com/google/gwt/inject/rebind/output/FragmentPackageName.html" target="_top"><B>FRAMES</B></A>
<A HREF="FragmentPackageName.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: <A HREF="#nested_class_summary">NESTED</A> | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
com.google.gwt.inject.rebind.output</FONT>
<BR>
Class FragmentPackageName</H2>
<PRE>
java.lang.Object
<IMG SRC="../../../../../../resources/inherit.gif" ALT="extended by "><B>com.google.gwt.inject.rebind.output.FragmentPackageName</B>
</PRE>
<HR>
<DL>
<DT><PRE>public class <B>FragmentPackageName</B><DT>extends java.lang.Object</DL>
</PRE>
<P>
Wrapping a String in <A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.html" title="class in com.google.gwt.inject.rebind.output"><CODE>FragmentPackageName</CODE></A> converts it to a legal name
for a fragment package. Any code that manipulates the package name of a
fragment should store and/or pass it around using this class, to ensure that
the name is legal.
<p>Normally the requested name is used as the package name, but the JVM
forbids us from placing generated code in certain packages. Luckily, we
never actually need to place code in those packages anyway, even if our rules
would normally cause us to do so (because users of Gin can only access public
parts of those packages). Since it doesn't matter where those methods go, we
arbitrarily put them in the fragment corresponding to the ginjector
interface.
<P>
<P>
<HR>
<P>
<!-- ======== NESTED CLASS SUMMARY ======== -->
<A NAME="nested_class_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Nested Class Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static interface</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.Factory.html" title="interface in com.google.gwt.inject.rebind.output">FragmentPackageName.Factory</A></B></CODE>
<BR>
</TD>
</TR>
</TABLE>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> boolean</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.html#equals(java.lang.Object)">equals</A></B>(java.lang.Object obj)</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> int</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.html#hashCode()">hashCode</A></B>()</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> java.lang.String</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.html#toString()">toString</A></B>()</CODE>
<BR>
</TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE>clone, finalize, getClass, notify, notifyAll, wait, wait, wait</CODE></TD>
</TR>
</TABLE>
<P>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="equals(java.lang.Object)"><!-- --></A><H3>
equals</H3>
<PRE>
public boolean <B>equals</B>(java.lang.Object obj)</PRE>
<DL>
<DD><DL>
<DT><B>Overrides:</B><DD><CODE>equals</CODE> in class <CODE>java.lang.Object</CODE></DL>
</DD>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="hashCode()"><!-- --></A><H3>
hashCode</H3>
<PRE>
public int <B>hashCode</B>()</PRE>
<DL>
<DD><DL>
<DT><B>Overrides:</B><DD><CODE>hashCode</CODE> in class <CODE>java.lang.Object</CODE></DL>
</DD>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="toString()"><!-- --></A><H3>
toString</H3>
<PRE>
public java.lang.String <B>toString</B>()</PRE>
<DL>
<DD><DL>
<DT><B>Overrides:</B><DD><CODE>toString</CODE> in class <CODE>java.lang.Object</CODE></DL>
</DD>
<DD><DL>
</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV CLASS
<A HREF="../../../../../../com/google/gwt/inject/rebind/output/FragmentPackageName.Factory.html" title="interface in com.google.gwt.inject.rebind.output"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?com/google/gwt/inject/rebind/output/FragmentPackageName.html" target="_top"><B>FRAMES</B></A>
<A HREF="FragmentPackageName.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: <A HREF="#nested_class_summary">NESTED</A> | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| mehdikwa/google-gin | javadoc/com/google/gwt/inject/rebind/output/FragmentPackageName.html | HTML | apache-2.0 | 11,469 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import java.util.concurrent.CountDownLatch;
/**
* Placeholder of an instance which will be accessed by other threads
* but is not yet created. Thread safe.
*/
class InstancePending<T> {
// Based on a subtle part of the Java Language Specification,
// in order to avoid a slight overhead of synchronization for each access.
private final CountDownLatch pendingLatch = new CountDownLatch(1);
/** Piggybacking on {@code pendingLatch}. */
private InstanceHolder<T> instanceHolder;
private static class InstanceHolder<T> {
// The JLS ensures the visibility of a final field and its contents
// unless they are exposed to another thread while the construction.
final T instance;
InstanceHolder(T instance) {
this.instance = instance;
}
}
/**
* Returns the instance given by the method {@link #prepare}.
* This is an uninterruptible blocking method
* and the interruption flag will be set just before returning if any.
*/
T get() {
InstanceHolder<T> instanceHolder;
boolean interrupted = false;
while ((instanceHolder = this.instanceHolder) == null) {
try {
pendingLatch.await();
} catch (InterruptedException e) {
interrupted = true;
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
return instanceHolder.instance;
}
/**
* Associates the given instance for the method {@link #get}.
* This method should be called once, and {@code instance} should be non-null.
* This method is expected to call as soon as possible
* because the method {@code get} is uninterruptibly blocked until this method is called.
*/
void prepare(T instance) {
assert instance != null;
instanceHolder = new InstanceHolder<>(instance);
pendingLatch.countDown();
}
}
| ultratendency/hbase | hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java | Java | apache-2.0 | 2,665 |
package containerservice
import "github.com/Azure/azure-sdk-for-go/version"
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
return "Azure-SDK-For-Go/" + version.Number + " containerservice/2018-03-31"
}
// Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return version.Number
}
| linzhaoming/origin | vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/version.go | GO | apache-2.0 | 1,160 |
// stdafx.h : ±ê׼ϵͳ°üº¬ÎļþµÄ°üº¬Îļþ£¬
// »òÊdz£Óõ«²»³£¸ü¸ÄµÄÏîÄ¿ÌØ¶¨µÄ°üº¬Îļþ
//
#pragma once
// TODO: ÔÚ´Ë´¦ÒýÓóÌÐòÒªÇóµÄ¸½¼ÓÍ·Îļþ
| hankwing/Squirrel | library/acl/test/url_coder/stdafx.h | C | apache-2.0 | 150 |
/*******************************************************************************
* Copyright (c) 2009 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
* Zend Technologies
*******************************************************************************/
package org.eclipse.php.internal.debug.core.preferences.stepFilters;
/**
* Represents a listener to Debug Step Filter Preferences changes.
*
* @author yaronm
*/
public interface IDebugStepFilterPrefListener {
public void debugStepFilterModified(DebugStepFilterEvent event);
}
| vovagrechka/fucking-everything | phizdets/phizdets-idea/eclipse-src/org.eclipse.php.debug.core/src/org/eclipse/php/internal/debug/core/preferences/stepFilters/IDebugStepFilterPrefListener.java | Java | apache-2.0 | 844 |
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcm
import (
"fmt"
"net/url"
kube_api "k8s.io/kubernetes/pkg/api"
"github.com/golang/glog"
"k8s.io/heapster/extpoints"
sink_api "k8s.io/heapster/sinks/api"
)
type gcmSink struct {
core *GcmCore
}
// Adds the specified metrics or updates them if they already exist.
func (self gcmSink) Register(metrics []sink_api.MetricDescriptor) error {
for _, metric := range metrics {
if err := self.core.Register(metric.Name, metric.Description, metric.Type.String(), metric.ValueType.String(), metric.Labels); err != nil {
return err
}
if rateMetric, exists := gcmRateMetrics[metric.Name]; exists {
if err := self.core.Register(rateMetric.name, rateMetric.description, sink_api.MetricGauge.String(), sink_api.ValueDouble.String(), metric.Labels); err != nil {
return err
}
}
}
return nil
}
func (self gcmSink) Unregister(metrics []sink_api.MetricDescriptor) error {
for _, metric := range metrics {
if err := self.core.Unregister(metric.Name); err != nil {
return err
}
if rateMetric, exists := gcmRateMetrics[metric.Name]; exists {
if err := self.core.Unregister(rateMetric.name); err != nil {
return err
}
}
}
return nil
}
// Stores events into the backend.
func (self gcmSink) StoreEvents([]kube_api.Event) error {
// No-op, Google Cloud Monitoring doesn't store events
return nil
}
// Pushes the specified metric values in input. The metrics must already exist.
func (self gcmSink) StoreTimeseries(input []sink_api.Timeseries) error {
// Build a map of metrics by name.
metrics := make(map[string][]Timeseries)
for _, entry := range input {
metric := entry.Point
metricTimeseries, err := self.core.GetMetric(metric)
if err != nil {
return err
}
metrics[metric.Name] = append(metrics[metric.Name], *metricTimeseries)
// TODO(vmarmol): Stop doing this when GCM supports graphing cumulative metrics.
// Translate cumulative to gauge by taking the delta over the time period.
rateMetricTimeseries, err := self.core.GetEquivalentRateMetric(metric)
if err != nil {
return err
}
if rateMetricTimeseries == nil {
continue
}
rateMetricName := rateMetricTimeseries.TimeseriesDescriptor.Metric
metrics[rateMetricName] = append(metrics[rateMetricName], *rateMetricTimeseries)
}
return self.core.StoreTimeseries(metrics)
}
func (self gcmSink) DebugInfo() string {
return "Sink Type: GCM"
}
func (self gcmSink) Name() string {
return "Google Cloud Monitoring Sink"
}
func init() {
extpoints.SinkFactories.Register(CreateGCMSink, "gcm")
}
func CreateGCMSink(uri *url.URL, _ extpoints.HeapsterConf) ([]sink_api.ExternalSink, error) {
if *uri != (url.URL{}) {
return nil, fmt.Errorf("gcm sinks don't take arguments")
}
core, err := NewCore()
sink := gcmSink{core: core}
glog.Infof("created GCM sink")
return []sink_api.ExternalSink{sink}, err
}
| rhuss/gofabric8 | vendor/k8s.io/heapster/sinks/gcm/driver.go | GO | apache-2.0 | 3,463 |
// Copyright 2012 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.cloudera.impala.analysis;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.cloudera.impala.catalog.ColumnStats;
import com.cloudera.impala.common.AnalysisException;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* Representation of a union with its list of operands, and optional order by and limit.
* A union materializes its results, and its resultExprs are slotrefs into the
* materialized tuple.
* During analysis, the operands are normalized (separated into a single sequence of
* DISTINCT followed by a single sequence of ALL operands) and unnested to the extent
* possible. This also creates the AggregationInfo for DISTINCT operands.
*/
public class UnionStmt extends QueryStmt {
private final static Logger LOG = LoggerFactory.getLogger(UnionStmt.class);
public static enum Qualifier {
ALL,
DISTINCT
}
/**
* Represents an operand to a union, created by the parser.
* Contains a query statement and the all/distinct qualifier
* of the union operator (null for the first queryStmt).
*/
public static class UnionOperand {
// Qualifier as seen by the parser. Null for the first operand.
private final Qualifier originalQualifier_;
/////////////////////////////////////////
// BEGIN: Members that need to be reset()
private final QueryStmt queryStmt_;
// Effective qualifier. Possibly different from parsedQualifier_ due
// to DISTINCT propagation.
private Qualifier qualifier_;
// Analyzer used for this operand. Set in analyze().
// We must preserve the conjuncts registered in the analyzer for partition pruning.
private Analyzer analyzer_;
// Map from UnionStmt's result slots to our resultExprs. Used during plan generation.
private final ExprSubstitutionMap smap_;
// END: Members that need to be reset()
/////////////////////////////////////////
public UnionOperand(QueryStmt queryStmt, Qualifier qualifier) {
queryStmt_ = queryStmt;
originalQualifier_ = qualifier;
qualifier_ = qualifier;
smap_ = new ExprSubstitutionMap();
}
public void analyze(Analyzer parent) throws AnalysisException {
if (isAnalyzed()) return;
analyzer_ = new Analyzer(parent);
queryStmt_.analyze(analyzer_);
}
public boolean isAnalyzed() { return analyzer_ != null; }
public QueryStmt getQueryStmt() { return queryStmt_; }
public Qualifier getQualifier() { return qualifier_; }
// Used for propagating DISTINCT.
public void setQualifier(Qualifier qualifier) { qualifier_ = qualifier; }
public Analyzer getAnalyzer() { return analyzer_; }
public ExprSubstitutionMap getSmap() { return smap_; }
public boolean hasAnalyticExprs() {
if (queryStmt_ instanceof SelectStmt) {
return ((SelectStmt) queryStmt_).hasAnalyticInfo();
} else {
Preconditions.checkState(queryStmt_ instanceof UnionStmt);
return ((UnionStmt) queryStmt_).hasAnalyticExprs();
}
}
/**
* C'tor for cloning.
*/
private UnionOperand(UnionOperand other) {
queryStmt_ = other.queryStmt_.clone();
originalQualifier_ = other.originalQualifier_;
qualifier_ = other.qualifier_;
analyzer_ = other.analyzer_;
smap_ = other.smap_.clone();
}
public void reset() {
queryStmt_.reset();
qualifier_ = originalQualifier_;
analyzer_ = null;
smap_.clear();
}
@Override
public UnionOperand clone() { return new UnionOperand(this); }
}
/////////////////////////////////////////
// BEGIN: Members that need to be reset()
// before analysis, this contains the list of union operands derived verbatim
// from the query;
// after analysis, this contains all of distinctOperands followed by allOperands
protected final List<UnionOperand> operands_;
// filled during analyze(); contains all operands that need to go through
// distinct aggregation
protected final List<UnionOperand> distinctOperands_ = Lists.newArrayList();
// filled during analyze(); contains all operands that can be aggregated with
// a simple merge without duplicate elimination (also needs to merge the output
// of the DISTINCT operands)
protected final List<UnionOperand> allOperands_ = Lists.newArrayList();
protected AggregateInfo distinctAggInfo_; // only set if we have DISTINCT ops
// Single tuple materialized by the union. Set in analyze().
protected TupleId tupleId_;
// set prior to unnesting
protected String toSqlString_ = null;
// true if any of the operands_ references an AnalyticExpr
private boolean hasAnalyticExprs_ = false;
// END: Members that need to be reset()
/////////////////////////////////////////
public UnionStmt(List<UnionOperand> operands,
ArrayList<OrderByElement> orderByElements, LimitElement limitElement) {
super(orderByElements, limitElement);
operands_ = operands;
}
/**
* C'tor for cloning.
*/
protected UnionStmt(UnionStmt other) {
super(other.cloneOrderByElements(),
(other.limitElement_ == null) ? null : other.limitElement_.clone());
operands_ = Lists.newArrayList();
if (analyzer_ != null) {
for (UnionOperand o: other.distinctOperands_) distinctOperands_.add(o.clone());
for (UnionOperand o: other.allOperands_) allOperands_.add(o.clone());
operands_.addAll(distinctOperands_);
operands_.addAll(allOperands_);
} else {
for (UnionOperand operand: other.operands_) operands_.add(operand.clone());
}
analyzer_ = other.analyzer_;
distinctAggInfo_ =
(other.distinctAggInfo_ != null) ? other.distinctAggInfo_.clone() : null;
tupleId_ = other.tupleId_;
toSqlString_ = (other.toSqlString_ != null) ? new String(other.toSqlString_) : null;
hasAnalyticExprs_ = other.hasAnalyticExprs_;
withClause_ = (other.withClause_ != null) ? other.withClause_.clone() : null;
}
public List<UnionOperand> getOperands() { return operands_; }
public List<UnionOperand> getDistinctOperands() { return distinctOperands_; }
public boolean hasDistinctOps() { return !distinctOperands_.isEmpty(); }
public List<UnionOperand> getAllOperands() { return allOperands_; }
public boolean hasAllOps() { return !allOperands_.isEmpty(); }
public AggregateInfo getDistinctAggInfo() { return distinctAggInfo_; }
public boolean hasAnalyticExprs() { return hasAnalyticExprs_; }
public void removeAllOperands() {
operands_.removeAll(allOperands_);
allOperands_.clear();
}
/**
* Propagates DISTINCT from left to right, and checks that all
* union operands are union compatible, adding implicit casts if necessary.
*/
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
if (isAnalyzed()) return;
try {
super.analyze(analyzer);
} catch (AnalysisException e) {
if (analyzer.getMissingTbls().isEmpty()) throw e;
}
Preconditions.checkState(operands_.size() > 0);
// Propagates DISTINCT from right to left
propagateDistinct();
// Make sure all operands return an equal number of exprs.
QueryStmt firstQuery = operands_.get(0).getQueryStmt();
try {
operands_.get(0).analyze(analyzer);
} catch (AnalysisException e) {
if (analyzer.getMissingTbls().isEmpty()) throw e;
}
List<List<Expr>> resultExprLists = Lists.newArrayList();
List<Expr> firstQueryExprs = firstQuery.getBaseTblResultExprs();
resultExprLists.add(firstQueryExprs);
for (int i = 1; i < operands_.size(); ++i) {
QueryStmt query = operands_.get(i).getQueryStmt();
try {
operands_.get(i).analyze(analyzer);
List<Expr> exprs = query.getBaseTblResultExprs();
if (firstQueryExprs.size() != exprs.size()) {
throw new AnalysisException("Operands have unequal number of columns:\n" +
"'" + queryStmtToSql(firstQuery) + "' has " +
firstQueryExprs.size() + " column(s)\n" +
"'" + queryStmtToSql(query) + "' has " + exprs.size() + " column(s)");
}
resultExprLists.add(exprs);
} catch (AnalysisException e) {
if (analyzer.getMissingTbls().isEmpty()) throw e;
}
}
if (!analyzer.getMissingTbls().isEmpty()) {
throw new AnalysisException("Found missing tables. Aborting analysis.");
}
// compute hasAnalyticExprs_
hasAnalyticExprs_ = false;
for (UnionOperand op: operands_) {
if (op.hasAnalyticExprs()) {
hasAnalyticExprs_ = true;
break;
}
}
analyzer.castToUnionCompatibleTypes(resultExprLists);
// Create tuple descriptor materialized by this UnionStmt,
// its resultExprs, and its sortInfo if necessary.
createMetadata(analyzer);
createSortInfo(analyzer);
toSqlString_ = toSql();
unnestOperands(analyzer);
if (evaluateOrderBy_) createSortTupleInfo(analyzer);
baseTblResultExprs_ = resultExprs_;
}
/**
* Marks the baseTblResultExprs of its operands as materialized, based on
* which of the output slots have been marked.
* Calls materializeRequiredSlots() on the operands themselves.
*/
@Override
public void materializeRequiredSlots(Analyzer analyzer) {
TupleDescriptor tupleDesc = analyzer.getDescTbl().getTupleDesc(tupleId_);
if (!distinctOperands_.isEmpty()) {
// to keep things simple we materialize all grouping exprs = output slots,
// regardless of what's being referenced externally
for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
slotDesc.setIsMaterialized(true);
}
}
if (evaluateOrderBy_) {
sortInfo_.materializeRequiredSlots(analyzer, null);
}
// collect operands' result exprs
List<SlotDescriptor> outputSlots = tupleDesc.getSlots();
List<Expr> exprs = Lists.newArrayList();
for (int i = 0; i < outputSlots.size(); ++i) {
SlotDescriptor slotDesc = outputSlots.get(i);
if (!slotDesc.isMaterialized()) continue;
for (UnionOperand op: operands_) {
exprs.add(op.getQueryStmt().getBaseTblResultExprs().get(i));
}
if (distinctAggInfo_ != null) {
// also mark the corresponding slot in the distinct agg tuple as being
// materialized
distinctAggInfo_.getOutputTupleDesc().getSlots().get(i).setIsMaterialized(true);
}
}
materializeSlots(analyzer, exprs);
for (UnionOperand op: operands_) {
op.getQueryStmt().materializeRequiredSlots(analyzer);
}
}
/**
* Fill distinct-/allOperands and performs possible unnesting of UnionStmt
* operands in the process.
*/
private void unnestOperands(Analyzer analyzer) throws AnalysisException {
if (operands_.size() == 1) {
// ValuesStmt for a single row.
allOperands_.add(operands_.get(0));
setOperandSmap(operands_.get(0), analyzer);
return;
}
// find index of first ALL operand
int firstUnionAllIdx = operands_.size();
for (int i = 1; i < operands_.size(); ++i) {
UnionOperand operand = operands_.get(i);
if (operand.getQualifier() == Qualifier.ALL) {
firstUnionAllIdx = (i == 1 ? 0 : i);
break;
}
}
// operands[0] is always implicitly ALL, so operands[1] can't be the
// first one
Preconditions.checkState(firstUnionAllIdx != 1);
// unnest DISTINCT operands
Preconditions.checkState(distinctOperands_.isEmpty());
for (int i = 0; i < firstUnionAllIdx; ++i) {
unnestOperand(distinctOperands_, Qualifier.DISTINCT, operands_.get(i));
}
// unnest ALL operands
Preconditions.checkState(allOperands_.isEmpty());
for (int i = firstUnionAllIdx; i < operands_.size(); ++i) {
unnestOperand(allOperands_, Qualifier.ALL, operands_.get(i));
}
operands_.clear();
operands_.addAll(distinctOperands_);
operands_.addAll(allOperands_);
// create unnested operands' smaps
for (UnionOperand operand: operands_) {
setOperandSmap(operand, analyzer);
}
// create distinctAggInfo, if necessary
if (!distinctOperands_.isEmpty()) {
// Aggregate produces exactly the same tuple as the original union stmt.
ArrayList<Expr> groupingExprs = Expr.cloneList(resultExprs_);
try {
distinctAggInfo_ =
AggregateInfo.create(groupingExprs, null,
analyzer.getDescTbl().getTupleDesc(tupleId_), analyzer);
} catch (AnalysisException e) {
// this should never happen
throw new AnalysisException("error creating agg info in UnionStmt.analyze()");
}
}
}
/**
* Sets the smap for the given operand. It maps from the output slots this union's
* tuple to the corresponding base table exprs of the operand.
*/
private void setOperandSmap(UnionOperand operand, Analyzer analyzer) {
TupleDescriptor tupleDesc = analyzer.getDescTbl().getTupleDesc(tupleId_);
// operands' smaps were already set in the operands' analyze()
operand.getSmap().clear();
for (int i = 0; i < tupleDesc.getSlots().size(); ++i) {
SlotDescriptor outputSlot = tupleDesc.getSlots().get(i);
operand.getSmap().put(
new SlotRef(outputSlot),
// TODO: baseTblResultExprs?
operand.getQueryStmt().getResultExprs().get(i).clone());
}
}
/**
* Add a single operand to the target list; if the operand itself is a UnionStmt,
* apply unnesting to the extent possible (possibly modifying 'operand' in the process).
*/
private void unnestOperand(
List<UnionOperand> target, Qualifier targetQualifier, UnionOperand operand) {
QueryStmt queryStmt = operand.getQueryStmt();
if (queryStmt instanceof SelectStmt) {
target.add(operand);
return;
}
Preconditions.checkState(queryStmt instanceof UnionStmt);
UnionStmt unionStmt = (UnionStmt) queryStmt;
if (unionStmt.hasLimit() || unionStmt.hasOffset()) {
// we must preserve the nested Union
target.add(operand);
} else if (targetQualifier == Qualifier.DISTINCT || !unionStmt.hasDistinctOps()) {
// there is no limit in the nested Union and we can absorb all of its
// operands as-is
target.addAll(unionStmt.getDistinctOperands());
target.addAll(unionStmt.getAllOperands());
} else {
// the nested Union contains some Distinct ops and we're accumulating
// into our All ops; unnest only the All ops and leave the rest in place
target.addAll(unionStmt.getAllOperands());
unionStmt.removeAllOperands();
target.add(operand);
}
}
/**
* String representation of queryStmt used in reporting errors.
* Allow subclasses to override this.
*/
protected String queryStmtToSql(QueryStmt queryStmt) {
return queryStmt.toSql();
}
/**
* Propagates DISTINCT (if present) from right to left.
* Implied associativity:
* A UNION ALL B UNION DISTINCT C = (A UNION ALL B) UNION DISTINCT C
* = A UNION DISTINCT B UNION DISTINCT C
*/
private void propagateDistinct() {
int lastDistinctPos = -1;
for (int i = operands_.size() - 1; i > 0; --i) {
UnionOperand operand = operands_.get(i);
if (lastDistinctPos != -1) {
// There is a DISTINCT somewhere to the right.
operand.setQualifier(Qualifier.DISTINCT);
} else if (operand.getQualifier() == Qualifier.DISTINCT) {
lastDistinctPos = i;
}
}
}
/**
* Create a descriptor for the tuple materialized by the union.
* Set resultExprs to be slot refs into that tuple.
* Also fills the substitution map, such that "order by" can properly resolve
* column references from the result of the union.
*/
private void createMetadata(Analyzer analyzer) throws AnalysisException {
// Create tuple descriptor for materialized tuple created by the union.
TupleDescriptor tupleDesc = analyzer.getDescTbl().createTupleDescriptor("union");
tupleDesc.setIsMaterialized(true);
tupleId_ = tupleDesc.getId();
LOG.trace("UnionStmt.createMetadata: tupleId=" + tupleId_.toString());
// One slot per expr in the select blocks. Use first select block as representative.
List<Expr> firstSelectExprs = operands_.get(0).getQueryStmt().getBaseTblResultExprs();
// Compute column stats for the materialized slots from the source exprs.
List<ColumnStats> columnStats = Lists.newArrayList();
for (int i = 0; i < operands_.size(); ++i) {
List<Expr> selectExprs = operands_.get(i).getQueryStmt().getBaseTblResultExprs();
for (int j = 0; j < selectExprs.size(); ++j) {
ColumnStats statsToAdd = ColumnStats.fromExpr(selectExprs.get(j));
if (i == 0) {
columnStats.add(statsToAdd);
} else {
columnStats.get(j).add(statsToAdd);
}
}
}
// Create tuple descriptor and slots.
for (int i = 0; i < firstSelectExprs.size(); ++i) {
Expr expr = firstSelectExprs.get(i);
SlotDescriptor slotDesc = analyzer.addSlotDescriptor(tupleDesc);
slotDesc.setLabel(getColLabels().get(i));
slotDesc.setType(expr.getType());
slotDesc.setStats(columnStats.get(i));
SlotRef outputSlotRef = new SlotRef(slotDesc);
resultExprs_.add(outputSlotRef);
// Add to aliasSMap so that column refs in "order by" can be resolved.
if (orderByElements_ != null) {
SlotRef aliasRef = new SlotRef(getColLabels().get(i));
if (aliasSmap_.containsMappingFor(aliasRef)) {
ambiguousAliasList_.add(aliasRef);
} else {
aliasSmap_.put(aliasRef, outputSlotRef);
}
}
// register single-directional value transfers from output slot
// to operands' result exprs (if those happen to be slotrefs);
// don't do that if the operand computes analytic exprs
// (see Planner.createInlineViewPlan() for the reasoning)
for (UnionOperand op: operands_) {
Expr resultExpr = op.getQueryStmt().getBaseTblResultExprs().get(i);
slotDesc.addSourceExpr(resultExpr);
if (op.hasAnalyticExprs()) continue;
SlotRef slotRef = resultExpr.unwrapSlotRef(true);
if (slotRef == null) continue;
analyzer.registerValueTransfer(outputSlotRef.getSlotId(), slotRef.getSlotId());
}
}
baseTblResultExprs_ = resultExprs_;
}
public TupleId getTupleId() { return tupleId_; }
@Override
public void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList) {
// Return the sort tuple if there is an evaluated order by.
if (evaluateOrderBy_) {
tupleIdList.add(sortInfo_.getSortTupleDescriptor().getId());
} else {
tupleIdList.add(tupleId_);
}
}
@Override
public void collectTableRefs(List<TableRef> tblRefs) {
for (UnionOperand op: operands_) {
op.getQueryStmt().collectTableRefs(tblRefs);
}
}
@Override
public String toSql() {
if (toSqlString_ != null) return toSqlString_;
StringBuilder strBuilder = new StringBuilder();
Preconditions.checkState(operands_.size() > 0);
if (withClause_ != null) {
strBuilder.append(withClause_.toSql());
strBuilder.append(" ");
}
strBuilder.append(operands_.get(0).getQueryStmt().toSql());
for (int i = 1; i < operands_.size() - 1; ++i) {
strBuilder.append(" UNION " +
((operands_.get(i).getQualifier() == Qualifier.ALL) ? "ALL " : ""));
if (operands_.get(i).getQueryStmt() instanceof UnionStmt) {
strBuilder.append("(");
}
strBuilder.append(operands_.get(i).getQueryStmt().toSql());
if (operands_.get(i).getQueryStmt() instanceof UnionStmt) {
strBuilder.append(")");
}
}
// Determine whether we need parenthesis around the last union operand.
UnionOperand lastOperand = operands_.get(operands_.size() - 1);
QueryStmt lastQueryStmt = lastOperand.getQueryStmt();
strBuilder.append(" UNION " +
((lastOperand.getQualifier() == Qualifier.ALL) ? "ALL " : ""));
if (lastQueryStmt instanceof UnionStmt ||
((hasOrderByClause() || hasLimit() || hasOffset()) &&
!lastQueryStmt.hasLimit() && !lastQueryStmt.hasOffset() &&
!lastQueryStmt.hasOrderByClause())) {
strBuilder.append("(");
strBuilder.append(lastQueryStmt.toSql());
strBuilder.append(")");
} else {
strBuilder.append(lastQueryStmt.toSql());
}
// Order By clause
if (hasOrderByClause()) {
strBuilder.append(" ORDER BY ");
for (int i = 0; i < orderByElements_.size(); ++i) {
strBuilder.append(orderByElements_.get(i).toSql());
strBuilder.append((i+1 != orderByElements_.size()) ? ", " : "");
}
}
// Limit clause.
strBuilder.append(limitElement_.toSql());
return strBuilder.toString();
}
@Override
public ArrayList<String> getColLabels() {
Preconditions.checkState(operands_.size() > 0);
return operands_.get(0).getQueryStmt().getColLabels();
}
@Override
public UnionStmt clone() { return new UnionStmt(this); }
@Override
public void reset() {
super.reset();
for (UnionOperand op: operands_) op.reset();
distinctOperands_.clear();
allOperands_.clear();
distinctAggInfo_ = null;
tupleId_ = null;
toSqlString_ = null;
hasAnalyticExprs_ = false;
}
}
| brightchen/Impala | fe/src/main/java/com/cloudera/impala/analysis/UnionStmt.java | Java | apache-2.0 | 21,985 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.servicenow.auth;
import java.util.concurrent.TimeUnit;
import org.apache.camel.component.servicenow.ServiceNowConfiguration;
import org.apache.cxf.jaxrs.client.WebClient;
import org.apache.cxf.rs.security.oauth2.client.Consumer;
import org.apache.cxf.rs.security.oauth2.client.OAuthClientUtils;
import org.apache.cxf.rs.security.oauth2.common.ClientAccessToken;
import org.apache.cxf.rs.security.oauth2.grants.owner.ResourceOwnerGrant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OAuthToken {
private static final Logger LOGGER = LoggerFactory.getLogger(OAuthToken.class);
private final String url;
private final ServiceNowConfiguration configuration;
private ClientAccessToken token;
private String authString;
private long expireAt;
public OAuthToken(String url, ServiceNowConfiguration configuration) {
this.url = url;
this.configuration = configuration;
this.token = null;
this.authString = null;
this.expireAt = 0;
}
private synchronized void getOrRefreshAccessToken() {
if (token == null) {
LOGGER.debug("Generate OAuth token");
token = OAuthClientUtils.getAccessToken(
WebClient.create(url),
new Consumer(
configuration.getOauthClientId(),
configuration.getOauthClientSecret()),
new ResourceOwnerGrant(
configuration.getUserName(),
configuration.getPassword()),
true
);
LOGGER.debug("OAuth token expires in {}s", token.getExpiresIn());
// Set expiration time related info in milliseconds
token.setIssuedAt(System.currentTimeMillis());
token.setExpiresIn(TimeUnit.MILLISECONDS.convert(token.getExpiresIn(), TimeUnit.SECONDS));
authString = token.toString();
if (token.getExpiresIn() > 0) {
expireAt = token.getIssuedAt() + token.getExpiresIn();
}
} else if (expireAt > 0 && System.currentTimeMillis() >= expireAt) {
LOGGER.debug("OAuth token is expired, refresh it");
token = OAuthClientUtils.refreshAccessToken(
WebClient.create(url),
new Consumer(
configuration.getOauthClientId(),
configuration.getOauthClientSecret()),
token,
null,
false
);
LOGGER.debug("Refreshed OAuth token expires in {}s", token.getExpiresIn());
// Set expiration time related info in milliseconds
token.setIssuedAt(System.currentTimeMillis());
token.setExpiresIn(TimeUnit.MILLISECONDS.convert(token.getExpiresIn(), TimeUnit.SECONDS));
authString = token.toString();
if (token.getExpiresIn() > 0) {
expireAt = token.getIssuedAt() + token.getExpiresIn();
}
}
}
public ClientAccessToken getClientAccess() {
getOrRefreshAccessToken();
return token;
}
public String getAuthString() {
getOrRefreshAccessToken();
return authString;
}
}
| jlpedrosa/camel | components/camel-servicenow/src/main/java/org/apache/camel/component/servicenow/auth/OAuthToken.java | Java | apache-2.0 | 4,082 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using Microsoft.CodeAnalysis.CSharp.Symbols;
using Microsoft.CodeAnalysis.CSharp.Syntax;
using Microsoft.CodeAnalysis.Text;
namespace Microsoft.CodeAnalysis.CSharp.Syntax.InternalSyntax
{
internal class SyntaxListPool
{
private ArrayElement<SyntaxListBuilder>[] _freeList = new ArrayElement<SyntaxListBuilder>[10];
private int _freeIndex = 0;
#if DEBUG
private readonly List<SyntaxListBuilder> _allocated = new List<SyntaxListBuilder>();
#endif
internal SyntaxListPool()
{
}
internal SyntaxListBuilder Allocate()
{
SyntaxListBuilder item;
if (_freeIndex > 0)
{
_freeIndex--;
item = _freeList[_freeIndex].Value;
_freeList[_freeIndex].Value = null;
}
else
{
item = new SyntaxListBuilder(10);
}
#if DEBUG
Debug.Assert(!_allocated.Contains(item));
_allocated.Add(item);
#endif
return item;
}
internal SyntaxListBuilder<TNode> Allocate<TNode>() where TNode : CSharpSyntaxNode
{
return new SyntaxListBuilder<TNode>(this.Allocate());
}
internal SeparatedSyntaxListBuilder<TNode> AllocateSeparated<TNode>() where TNode : CSharpSyntaxNode
{
return new SeparatedSyntaxListBuilder<TNode>(this.Allocate());
}
internal void Free<TNode>(SeparatedSyntaxListBuilder<TNode> item) where TNode : CSharpSyntaxNode
{
Free(item.UnderlyingBuilder);
}
internal void Free(SyntaxListBuilder item)
{
item.Clear();
if (_freeIndex >= _freeList.Length)
{
this.Grow();
}
#if DEBUG
Debug.Assert(_allocated.Contains(item));
_allocated.Remove(item);
#endif
_freeList[_freeIndex].Value = item;
_freeIndex++;
}
private void Grow()
{
var tmp = new ArrayElement<SyntaxListBuilder>[_freeList.Length * 2];
Array.Copy(_freeList, tmp, _freeList.Length);
_freeList = tmp;
}
}
}
| jgglg/roslyn | src/Compilers/CSharp/Portable/Parser/SyntaxListPool.cs | C# | apache-2.0 | 2,457 |
/*
* Copyright Terracotta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ehcache.impl.serialization;
import org.ehcache.spi.serialization.StatefulSerializer;
import org.junit.Test;
import java.io.Serializable;
import java.nio.ByteBuffer;
import static org.ehcache.impl.serialization.SerializerTestUtilities.createClassNameRewritingLoader;
import static org.ehcache.impl.serialization.SerializerTestUtilities.newClassName;
import static org.ehcache.impl.serialization.SerializerTestUtilities.popTccl;
import static org.ehcache.impl.serialization.SerializerTestUtilities.pushTccl;
/**
*
* @author cdennis
*/
public class AddedSuperClassTest {
@Test
public void testAddedSuperClass() throws Exception {
StatefulSerializer<Serializable> serializer = new CompactJavaSerializer<>(null);
serializer.init(new TransientStateRepository());
ClassLoader loaderA = createClassNameRewritingLoader(A_2.class, AddedSuperClass_Hidden.class);
Serializable a = (Serializable) loaderA.loadClass(newClassName(A_2.class)).newInstance();
ByteBuffer encodedA = serializer.serialize(a);
pushTccl(createClassNameRewritingLoader(A_1.class));
try {
serializer.read(encodedA);
} finally {
popTccl();
}
}
@Test
public void testAddedSuperClassNotHidden() throws Exception {
StatefulSerializer<Serializable> serializer = new CompactJavaSerializer<>(null);
serializer.init(new TransientStateRepository());
ClassLoader loaderA = createClassNameRewritingLoader(A_2.class, AddedSuperClass_Hidden.class);
Serializable a = (Serializable) loaderA.loadClass(newClassName(A_2.class)).newInstance();
ByteBuffer encodedA = serializer.serialize(a);
pushTccl(createClassNameRewritingLoader(A_1.class, AddedSuperClass_Hidden.class));
try {
serializer.read(encodedA);
} finally {
popTccl();
}
}
public static class AddedSuperClass_Hidden implements Serializable {
private static final long serialVersionUID = 1L;
int field;
}
public static class A_2 extends AddedSuperClass_Hidden {
private static final long serialVersionUID = 1L;
}
public static class A_1 implements Serializable {
private static final long serialVersionUID = 1L;
}
}
| GaryWKeim/ehcache3 | impl/src/test/java/org/ehcache/impl/serialization/AddedSuperClassTest.java | Java | apache-2.0 | 2,781 |
// Copyright Louis Dionne 2013-2016
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#include <boost/hana/assert.hpp>
#include <boost/hana/equal.hpp>
#include <boost/hana/experimental/types.hpp>
#include <boost/hana/transform.hpp>
#include <boost/hana/type.hpp>
namespace hana = boost::hana;
template <typename ...>
struct mf { struct type; };
template <int> struct x;
struct undefined { };
int main() {
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<>{}, undefined{}),
hana::experimental::types<>{}
));
// with a Metafunction
{
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>>{}, hana::metafunction<mf>),
hana::experimental::types<mf<x<0>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>>{}, hana::metafunction<mf>),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>, x<2>>{}, hana::metafunction<mf>),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type, mf<x<2>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>, x<2>, x<3>>{}, hana::metafunction<mf>),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type, mf<x<2>>::type, mf<x<3>>::type>{}
));
}
// with a non-Metafunction
{
auto f = [](auto t) {
return hana::metafunction<mf>(t);
};
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>>{}, f),
hana::experimental::types<mf<x<0>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>>{}, f),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>, x<2>>{}, f),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type, mf<x<2>>::type>{}
));
BOOST_HANA_CONSTANT_CHECK(hana::equal(
hana::transform(hana::experimental::types<x<0>, x<1>, x<2>, x<3>>{}, f),
hana::experimental::types<mf<x<0>>::type, mf<x<1>>::type, mf<x<2>>::type, mf<x<3>>::type>{}
));
}
}
| bureau14/qdb-benchmark | thirdparty/boost/libs/hana/test/experimental/types/transform.cpp | C++ | bsd-2-clause | 2,646 |
class Hashcat < Cask
version '0.47'
sha256 '239acb25b88d529314f2f98af0d6a66772e886c9efbb4ed2b94b7587c9a68455'
url 'https://hashcat.net/files/hashcat-0.47.7z'
homepage 'https://hashcat.net/hashcat/'
depends_on_formula 'unar'
binary 'hashcat-0.47/hashcat-cli64.app', :target => 'hashcat'
end
| NorthIsUp/homebrew-cask | Casks/hashcat.rb | Ruby | bsd-2-clause | 305 |
class PrivateEye < Cask
version 'latest'
sha256 :no_check
url 'http://radiosilenceapp.com/downloads/Private%20Eye.pkg'
homepage 'http://radiosilenceapp.com/private-eye'
install 'Private Eye.pkg'
# We intentionally unload the kext twice as a workaround
# See https://github.com/caskroom/homebrew-cask/pull/1802#issuecomment-34171151
uninstall :early_script => {
:executable => '/sbin/kextunload',
:args => ['-b', 'com.radiosilenceapp.nke.PrivateEye'],
:must_succeed => false,
},
:quit => 'com.radiosilenceapp.PrivateEye',
:kext => 'com.radiosilenceapp.nke.PrivateEye',
:pkgutil => 'com.radiosilenceapp.privateEye.*'
end
| NorthIsUp/homebrew-cask | Casks/private-eye.rb | Ruby | bsd-2-clause | 727 |
class XamarinIos < Cask
version '7.2.4.4'
sha256 'd6056e2ab4e529d7a04e9bcca915d780e3d9a76593c14791040b29651f6f2e5d'
url 'http://download.xamarin.com/MonoTouch/Mac/monotouch-7.2.4.4.pkg'
# non-Sparkle appcast
appcast 'http://xamarin.com/installer_assets/v3/Mac/Universal/InstallationManifest.xml'
homepage 'http://xamarin.com/ios'
install 'monotouch-7.2.4.4.pkg'
uninstall :pkgutil => 'com.xamarin.monotouch.pkg'
end
| NorthIsUp/homebrew-cask | Casks/xamarin-ios.rb | Ruby | bsd-2-clause | 434 |
class Flowdock < Cask
version 'latest'
sha256 :no_check
url 'https://flowdock-resources.s3.amazonaws.com/mac/Flowdock.zip'
appcast 'https://s3.amazonaws.com/flowdock-resources/mac/appcast.xml'
homepage 'https://www.flowdock.com/'
link 'Flowdock.app'
end
| thegcat/homebrew-cask | Casks/flowdock.rb | Ruby | bsd-2-clause | 268 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_HTTP_HTTP_STREAM_FACTORY_IMPL_REQUEST_H_
#define NET_HTTP_HTTP_STREAM_FACTORY_IMPL_REQUEST_H_
#include <set>
#include "base/memory/scoped_ptr.h"
#include "net/base/net_log.h"
#include "net/http/http_stream_factory_impl.h"
#include "net/socket/ssl_client_socket.h"
#include "net/spdy/spdy_session_key.h"
#include "url/gurl.h"
namespace net {
class ClientSocketHandle;
class HttpStream;
class SpdySession;
class HttpStreamFactoryImpl::Request : public HttpStreamRequest {
public:
Request(const GURL& url,
HttpStreamFactoryImpl* factory,
HttpStreamRequest::Delegate* delegate,
WebSocketHandshakeStreamBase::CreateHelper*
websocket_handshake_stream_create_helper,
const BoundNetLog& net_log);
~Request() override;
// The GURL from the HttpRequestInfo the started the Request.
const GURL& url() const { return url_; }
// Called when the Job determines the appropriate |spdy_session_key| for the
// Request. Note that this does not mean that SPDY is necessarily supported
// for this SpdySessionKey, since we may need to wait for NPN to complete
// before knowing if SPDY is available.
void SetSpdySessionKey(const SpdySessionKey& spdy_session_key);
bool HasSpdySessionKey() const;
// Attaches |job| to this request. Does not mean that Request will use |job|,
// but Request will own |job|.
void AttachJob(HttpStreamFactoryImpl::Job* job);
// Marks completion of the request. Must be called before OnStreamReady().
// |job_net_log| is the BoundNetLog of the Job that fulfilled this request.
void Complete(bool was_npn_negotiated,
NextProto protocol_negotiated,
bool using_spdy,
const BoundNetLog& job_net_log);
// If this Request has a |spdy_session_key_|, remove this session from the
// SpdySessionRequestMap.
void RemoveRequestFromSpdySessionRequestMap();
// Called by an attached Job if it sets up a SpdySession.
void OnNewSpdySessionReady(Job* job,
scoped_ptr<HttpStream> stream,
const base::WeakPtr<SpdySession>& spdy_session,
bool direct);
WebSocketHandshakeStreamBase::CreateHelper*
websocket_handshake_stream_create_helper() {
return websocket_handshake_stream_create_helper_;
}
// HttpStreamRequest::Delegate methods which we implement. Note we don't
// actually subclass HttpStreamRequest::Delegate.
void OnStreamReady(Job* job,
const SSLConfig& used_ssl_config,
const ProxyInfo& used_proxy_info,
HttpStream* stream);
void OnWebSocketHandshakeStreamReady(Job* job,
const SSLConfig& used_ssl_config,
const ProxyInfo& used_proxy_info,
WebSocketHandshakeStreamBase* stream);
void OnStreamFailed(Job* job, int status, const SSLConfig& used_ssl_config);
void OnCertificateError(Job* job,
int status,
const SSLConfig& used_ssl_config,
const SSLInfo& ssl_info);
void OnNeedsProxyAuth(Job* job,
const HttpResponseInfo& proxy_response,
const SSLConfig& used_ssl_config,
const ProxyInfo& used_proxy_info,
HttpAuthController* auth_controller);
void OnNeedsClientAuth(Job* job,
const SSLConfig& used_ssl_config,
SSLCertRequestInfo* cert_info);
void OnHttpsProxyTunnelResponse(
Job *job,
const HttpResponseInfo& response_info,
const SSLConfig& used_ssl_config,
const ProxyInfo& used_proxy_info,
HttpStream* stream);
// HttpStreamRequest methods.
int RestartTunnelWithProxyAuth(const AuthCredentials& credentials) override;
void SetPriority(RequestPriority priority) override;
LoadState GetLoadState() const override;
bool was_npn_negotiated() const override;
NextProto protocol_negotiated() const override;
bool using_spdy() const override;
private:
// Used to orphan all jobs in |jobs_| other than |job| which becomes "bound"
// to the request.
void OrphanJobsExcept(Job* job);
// Used to orphan all jobs in |jobs_|.
void OrphanJobs();
// Called when a Job succeeds.
void OnJobSucceeded(Job* job);
const GURL url_;
HttpStreamFactoryImpl* const factory_;
WebSocketHandshakeStreamBase::CreateHelper* const
websocket_handshake_stream_create_helper_;
HttpStreamRequest::Delegate* const delegate_;
const BoundNetLog net_log_;
// At the point where Job is irrevocably tied to the Request, we set this.
scoped_ptr<Job> bound_job_;
std::set<HttpStreamFactoryImpl::Job*> jobs_;
scoped_ptr<const SpdySessionKey> spdy_session_key_;
bool completed_;
bool was_npn_negotiated_;
// Protocol negotiated with the server.
NextProto protocol_negotiated_;
bool using_spdy_;
DISALLOW_COPY_AND_ASSIGN(Request);
};
} // namespace net
#endif // NET_HTTP_HTTP_STREAM_FACTORY_IMPL_REQUEST_H_
| mohamed--abdel-maksoud/chromium.src | net/http/http_stream_factory_impl_request.h | C | bsd-3-clause | 5,326 |
/************************************************************************************
* Copyright (C) 2018, Copyright Holders of the ALICE Collaboration *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following conditions are met: *
* * Redistributions of source code must retain the above copyright *
* notice, this list of conditions and the following disclaimer. *
* * Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* * Neither the name of the <organization> nor the *
* names of its contributors may be used to endorse or promote products *
* derived from this software without specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND *
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED *
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *
* DISCLAIMED. IN NO EVENT SHALL ALICE COLLABORATION BE LIABLE FOR ANY *
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES *
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; *
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
************************************************************************************/
#ifndef ALIANALYSISTASKEMCALCLUSTERSINJETS_H
#define ALIANALYSISTASKEMCALCLUSTERSINJETS_H
#include "AliAnalysisTaskEmcalJet.h"
#include <TObjArray.h>
#include <TString.h>
class THistManager;
namespace PWGJE {
namespace EMCALJetTasks {
class AliAnalysisTaskEmcalClustersInJets : public AliAnalysisTaskEmcalJet {
public:
AliAnalysisTaskEmcalClustersInJets();
AliAnalysisTaskEmcalClustersInJets(const char *name);
virtual ~AliAnalysisTaskEmcalClustersInJets();
void AddNameJetContainer(const char *name);
void SetNameClusterContainer(const char *name) { fNameClusterContainer = name; }
void SetNameTriggerClass(const char *name) { fNameTriggerClass = name; }
static AliAnalysisTaskEmcalClustersInJets *AddTaskEmcalClustersInJets(AliJetContainer::EJetType_t jettype, const char *trigger);
protected:
virtual void UserCreateOutputObjects();
virtual bool Run();
private:
THistManager *fHistos; //!<! Histogram manager
TObjArray fNamesJetContainers; ///< Names of the jet containers
TString fNameClusterContainer; ///< Name of the cluster container
TString fNameTriggerClass; ///< Name of the trigger class to be selected
ClassDef(AliAnalysisTaskEmcalClustersInJets, 1)
};
}
}
#endif
| fcolamar/AliPhysics | PWGJE/EMCALJetTasks/Tracks/AliAnalysisTaskEmcalClustersInJets.h | C | bsd-3-clause | 3,603 |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Server
* @subpackage Method
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id$
*/
/**
* Method callback metadata
*
* @category Zend
* @package Zend_Server
* @subpackage Method
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Server_Method_Callback
{
/**
* @var string Class name for class method callback
*/
protected $_class;
/**
* @var string Function name for function callback
*/
protected $_function;
/**
* @var string Method name for class method callback
*/
protected $_method;
/**
* @var string Callback type
*/
protected $_type;
/**
* @var array Valid callback types
*/
protected $_types = array('function', 'static', 'instance');
/**
* Constructor
*
* @param null|array $options
* @return void
*/
public function __construct($options = null)
{
if ((null !== $options) && is_array($options)) {
$this->setOptions($options);
}
}
/**
* Set object state from array of options
*
* @param array $options
* @return Zend_Server_Method_Callback
*/
public function setOptions(array $options)
{
foreach ($options as $key => $value) {
$method = 'set' . ucfirst($key);
if (method_exists($this, $method)) {
$this->$method($value);
}
}
return $this;
}
/**
* Set callback class
*
* @param string $class
* @return Zend_Server_Method_Callback
*/
public function setClass($class)
{
if (is_object($class)) {
$class = get_class($class);
}
$this->_class = $class;
return $this;
}
/**
* Get callback class
*
* @return string|null
*/
public function getClass()
{
return $this->_class;
}
/**
* Set callback function
*
* @param string $function
* @return Zend_Server_Method_Callback
*/
public function setFunction($function)
{
$this->_function = (string) $function;
$this->setType('function');
return $this;
}
/**
* Get callback function
*
* @return null|string
*/
public function getFunction()
{
return $this->_function;
}
/**
* Set callback class method
*
* @param string $method
* @return Zend_Server_Method_Callback
*/
public function setMethod($method)
{
$this->_method = $method;
return $this;
}
/**
* Get callback class method
*
* @return null|string
*/
public function getMethod()
{
return $this->_method;
}
/**
* Set callback type
*
* @param string $type
* @return Zend_Server_Method_Callback
* @throws Zend_Server_Exception
*/
public function setType($type)
{
if (!in_array($type, $this->_types)) {
// require_once 'Zend/Server/Exception.php';
throw new Zend_Server_Exception('Invalid method callback type passed to ' . __CLASS__ . '::' . __METHOD__);
}
$this->_type = $type;
return $this;
}
/**
* Get callback type
*
* @return string
*/
public function getType()
{
return $this->_type;
}
/**
* Cast callback to array
*
* @return array
*/
public function toArray()
{
$type = $this->getType();
$array = array(
'type' => $type,
);
if ('function' == $type) {
$array['function'] = $this->getFunction();
} else {
$array['class'] = $this->getClass();
$array['method'] = $this->getMethod();
}
return $array;
}
}
| yasarkunduz/pimcore | pimcore/lib/Zend/Server/Method/Callback.php | PHP | bsd-3-clause | 4,605 |
<?php
/**
* @link http://github.com/zendframework/zend-validator for the canonical source repository
* @copyright Copyright (c) 2005-2016 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace Zend\Validator;
use Interop\Container\ContainerInterface;
use Zend\ServiceManager\Config;
use Zend\ServiceManager\FactoryInterface;
use Zend\ServiceManager\ServiceLocatorInterface;
class ValidatorPluginManagerFactory implements FactoryInterface
{
/**
* zend-servicemanager v2 support for invocation options.
*
* @param array
*/
protected $creationOptions;
/**
* {@inheritDoc}
*
* @return ValidatorPluginManager
*/
public function __invoke(ContainerInterface $container, $name, array $options = null)
{
$pluginManager = new ValidatorPluginManager($container, $options ?: []);
// If this is in a zend-mvc application, the ServiceListener will inject
// merged configuration during bootstrap.
if ($container->has('ServiceListener')) {
return $pluginManager;
}
// If we do not have a config service, nothing more to do
if (! $container->has('config')) {
return $pluginManager;
}
$config = $container->get('config');
// If we do not have validators configuration, nothing more to do
if (! isset($config['validators']) || ! is_array($config['validators'])) {
return $pluginManager;
}
// Wire service configuration for validators
(new Config($config['validators']))->configureServiceManager($pluginManager);
return $pluginManager;
}
/**
* {@inheritDoc}
*
* @return ValidatorPluginManager
*/
public function createService(ServiceLocatorInterface $container, $name = null, $requestedName = null)
{
return $this($container, $requestedName ?: ValidatorPluginManager::class, $this->creationOptions);
}
/**
* zend-servicemanager v2 support for invocation options.
*
* @param array $options
* @return void
*/
public function setCreationOptions(array $options)
{
$this->creationOptions = $options;
}
}
| benoitduval/zf3-album | vendor/zendframework/zend-validator/src/ValidatorPluginManagerFactory.php | PHP | bsd-3-clause | 2,292 |
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/files/file_util.h"
#include "chrome/browser/extensions/api/image_writer_private/error_messages.h"
#include "chrome/browser/extensions/api/image_writer_private/operation_manager.h"
#include "chrome/browser/extensions/api/image_writer_private/write_from_url_operation.h"
#include "content/public/browser/browser_thread.h"
#include "net/url_request/url_fetcher.h"
namespace extensions {
namespace image_writer {
using content::BrowserThread;
WriteFromUrlOperation::WriteFromUrlOperation(
base::WeakPtr<OperationManager> manager,
const ExtensionId& extension_id,
net::URLRequestContextGetter* request_context,
GURL url,
const std::string& hash,
const std::string& device_path)
: Operation(manager, extension_id, device_path),
request_context_(request_context),
url_(url),
hash_(hash),
download_continuation_() {}
WriteFromUrlOperation::~WriteFromUrlOperation() {
}
void WriteFromUrlOperation::StartImpl() {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
GetDownloadTarget(base::Bind(
&WriteFromUrlOperation::Download,
this,
base::Bind(
&WriteFromUrlOperation::VerifyDownload,
this,
base::Bind(
&WriteFromUrlOperation::Unzip,
this,
base::Bind(&WriteFromUrlOperation::Write,
this,
base::Bind(&WriteFromUrlOperation::VerifyWrite,
this,
base::Bind(&WriteFromUrlOperation::Finish,
this)))))));
}
void WriteFromUrlOperation::GetDownloadTarget(
const base::Closure& continuation) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (IsCancelled()) {
return;
}
if (url_.ExtractFileName().empty()) {
if (!base::CreateTemporaryFileInDir(temp_dir_.path(), &image_path_)) {
Error(error::kTempFileError);
return;
}
} else {
base::FilePath file_name =
base::FilePath::FromUTF8Unsafe(url_.ExtractFileName());
image_path_ = temp_dir_.path().Append(file_name);
}
BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, continuation);
}
void WriteFromUrlOperation::Download(const base::Closure& continuation) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (IsCancelled()) {
return;
}
download_continuation_ = continuation;
SetStage(image_writer_api::STAGE_DOWNLOAD);
// Store the URL fetcher on this object so that it is destroyed before this
// object is.
url_fetcher_ = net::URLFetcher::Create(url_, net::URLFetcher::GET, this);
url_fetcher_->SetRequestContext(request_context_);
url_fetcher_->SaveResponseToFileAtPath(
image_path_,
BrowserThread::GetMessageLoopProxyForThread(BrowserThread::FILE));
AddCleanUpFunction(
base::Bind(&WriteFromUrlOperation::DestroyUrlFetcher, this));
url_fetcher_->Start();
}
void WriteFromUrlOperation::DestroyUrlFetcher() { url_fetcher_.reset(); }
void WriteFromUrlOperation::OnURLFetchUploadProgress(
const net::URLFetcher* source,
int64_t current,
int64_t total) {
// No-op
}
void WriteFromUrlOperation::OnURLFetchDownloadProgress(
const net::URLFetcher* source,
int64_t current,
int64_t total) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (IsCancelled()) {
url_fetcher_.reset(NULL);
}
int progress = (kProgressComplete * current) / total;
SetProgress(progress);
}
void WriteFromUrlOperation::OnURLFetchComplete(const net::URLFetcher* source) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (source->GetStatus().is_success() && source->GetResponseCode() == 200) {
SetProgress(kProgressComplete);
download_continuation_.Run();
// Remove the reference to ourselves in this closure.
download_continuation_ = base::Closure();
} else {
Error(error::kDownloadInterrupted);
}
}
void WriteFromUrlOperation::VerifyDownload(const base::Closure& continuation) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (IsCancelled()) {
return;
}
// Skip verify if no hash.
if (hash_.empty()) {
BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, continuation);
return;
}
SetStage(image_writer_api::STAGE_VERIFYDOWNLOAD);
GetMD5SumOfFile(
image_path_,
0,
0,
kProgressComplete,
base::Bind(
&WriteFromUrlOperation::VerifyDownloadCompare, this, continuation));
}
void WriteFromUrlOperation::VerifyDownloadCompare(
const base::Closure& continuation,
const std::string& download_hash) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (download_hash != hash_) {
Error(error::kDownloadHashError);
return;
}
BrowserThread::PostTask(
BrowserThread::FILE,
FROM_HERE,
base::Bind(
&WriteFromUrlOperation::VerifyDownloadComplete, this, continuation));
}
void WriteFromUrlOperation::VerifyDownloadComplete(
const base::Closure& continuation) {
DCHECK_CURRENTLY_ON(BrowserThread::FILE);
if (IsCancelled()) {
return;
}
SetProgress(kProgressComplete);
BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, continuation);
}
} // namespace image_writer
} // namespace extensions
| axinging/chromium-crosswalk | chrome/browser/extensions/api/image_writer_private/write_from_url_operation.cc | C++ | bsd-3-clause | 5,390 |
/*
Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/**
* @fileOverview Defines the {@link CKEDITOR.lang} object, for the
* Chinese Traditional language.
*/
/**#@+
@type String
@example
*/
/**
* Contains the dictionary of language entries.
* @namespace
*/
CKEDITOR.lang['zh'] =
{
/**
* The language reading direction. Possible values are "rtl" for
* Right-To-Left languages (like Arabic) and "ltr" for Left-To-Right
* languages (like English).
* @default 'ltr'
*/
dir : 'ltr',
/*
* Screenreader titles. Please note that screenreaders are not always capable
* of reading non-English words. So be careful while translating it.
*/
editorTitle : 'Rich text editor, %1', // MISSING
editorHelp : 'Press ALT 0 for help', // MISSING
// ARIA descriptions.
toolbars : '編輯器工具欄',
editor : '富文本編輯器',
// Toolbar buttons without dialogs.
source : '原始碼',
newPage : '開新檔案',
save : '儲存',
preview : '預覽',
cut : '剪下',
copy : '複製',
paste : '貼上',
print : '列印',
underline : '底線',
bold : '粗體',
italic : '斜體',
selectAll : '全選',
removeFormat : '清除格式',
strike : '刪除線',
subscript : '下標',
superscript : '上標',
horizontalrule : '插入水平線',
pagebreak : '插入分頁符號',
pagebreakAlt : '分頁符號',
unlink : '移除超連結',
undo : '復原',
redo : '重複',
// Common messages and labels.
common :
{
browseServer : '瀏覽伺服器端',
url : 'URL',
protocol : '通訊協定',
upload : '上傳',
uploadSubmit : '上傳至伺服器',
image : '影像',
flash : 'Flash',
form : '表單',
checkbox : '核取方塊',
radio : '選項按鈕',
textField : '文字方塊',
textarea : '文字區域',
hiddenField : '隱藏欄位',
button : '按鈕',
select : '清單/選單',
imageButton : '影像按鈕',
notSet : '<尚未設定>',
id : 'ID',
name : '名稱',
langDir : '語言方向',
langDirLtr : '由左而右 (LTR)',
langDirRtl : '由右而左 (RTL)',
langCode : '語言代碼',
longDescr : '詳細 URL',
cssClass : '樣式表類別',
advisoryTitle : '標題',
cssStyle : '樣式',
ok : '確定',
cancel : '取消',
close : '关闭',
preview : '预览',
generalTab : '一般',
advancedTab : '進階',
validateNumberFailed : '需要輸入數字格式',
confirmNewPage : '現存的修改尚未儲存,要開新檔案?',
confirmCancel : '部份選項尚未儲存,要關閉對話盒?',
options : '选项',
target : '目标',
targetNew : '新窗口(_blank)',
targetTop : '整页(_top)',
targetSelf : '本窗口(_self)',
targetParent : '父窗口(_parent)',
langDirLTR : 'Left to Right (LTR)', // MISSING
langDirRTL : 'Right to Left (RTL)', // MISSING
styles : 'Style', // MISSING
cssClasses : 'Stylesheet Classes', // MISSING
width : '寬度',
height : '高度',
align : '對齊',
alignLeft : '靠左對齊',
alignRight : '靠右對齊',
alignCenter : '置中',
alignTop : '靠上對齊',
alignMiddle : '置中對齊',
alignBottom : '靠下對齊',
invalidHeight : '高度必須為數字格式',
invalidWidth : '寬度必須為數字格式',
invalidCssLength : 'Value specified for the "%1" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).', // MISSING
invalidHtmlLength : 'Value specified for the "%1" field must be a positive number with or without a valid HTML measurement unit (px or %).', // MISSING
invalidInlineStyle : 'Value specified for the inline style must consist of one or more tuples with the format of "name : value", separated by semi-colons.', // MISSING
cssLengthTooltip : 'Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).', // MISSING
// Put the voice-only part of the label in the span.
unavailable : '%1<span class="cke_accessibility">, 已關閉</span>'
},
contextmenu :
{
options : 'Context Menu Options' // MISSING
},
// Special char dialog.
specialChar :
{
toolbar : '插入特殊符號',
title : '請選擇特殊符號',
options : 'Special Character Options' // MISSING
},
// Link dialog.
link :
{
toolbar : '插入/編輯超連結',
other : '<其他>',
menu : '編輯超連結',
title : '超連結',
info : '超連結資訊',
target : '目標',
upload : '上傳',
advanced : '進階',
type : '超連接類型',
toUrl : 'URL', // MISSING
toAnchor : '本頁錨點',
toEmail : '電子郵件',
targetFrame : '<框架>',
targetPopup : '<快顯視窗>',
targetFrameName : '目標框架名稱',
targetPopupName : '快顯視窗名稱',
popupFeatures : '快顯視窗屬性',
popupResizable : '可縮放',
popupStatusBar : '狀態列',
popupLocationBar: '網址列',
popupToolbar : '工具列',
popupMenuBar : '選單列',
popupFullScreen : '全螢幕 (IE)',
popupScrollBars : '捲軸',
popupDependent : '從屬 (NS)',
popupLeft : '左',
popupTop : '右',
id : 'ID',
langDir : '語言方向',
langDirLTR : '由左而右 (LTR)',
langDirRTL : '由右而左 (RTL)',
acccessKey : '存取鍵',
name : '名稱',
langCode : '語言方向',
tabIndex : '定位順序',
advisoryTitle : '標題',
advisoryContentType : '內容類型',
cssClasses : '樣式表類別',
charset : '連結資源之編碼',
styles : '樣式',
rel : 'Relationship', // MISSING
selectAnchor : '請選擇錨點',
anchorName : '依錨點名稱',
anchorId : '依元件 ID',
emailAddress : '電子郵件',
emailSubject : '郵件主旨',
emailBody : '郵件內容',
noAnchors : '(本文件尚無可用之錨點)',
noUrl : '請輸入欲連結的 URL',
noEmail : '請輸入電子郵件位址'
},
// Anchor dialog
anchor :
{
toolbar : '插入/編輯錨點',
menu : '錨點屬性',
title : '錨點屬性',
name : '錨點名稱',
errorName : '請輸入錨點名稱',
remove : 'Remove Anchor' // MISSING
},
// List style dialog
list:
{
numberedTitle : 'Numbered List Properties', // MISSING
bulletedTitle : 'Bulleted List Properties', // MISSING
type : 'Type', // MISSING
start : 'Start', // MISSING
validateStartNumber :'List start number must be a whole number.', // MISSING
circle : 'Circle', // MISSING
disc : 'Disc', // MISSING
square : 'Square', // MISSING
none : 'None', // MISSING
notset : '<not set>', // MISSING
armenian : 'Armenian numbering', // MISSING
georgian : 'Georgian numbering (an, ban, gan, etc.)', // MISSING
lowerRoman : 'Lower Roman (i, ii, iii, iv, v, etc.)', // MISSING
upperRoman : 'Upper Roman (I, II, III, IV, V, etc.)', // MISSING
lowerAlpha : 'Lower Alpha (a, b, c, d, e, etc.)', // MISSING
upperAlpha : 'Upper Alpha (A, B, C, D, E, etc.)', // MISSING
lowerGreek : 'Lower Greek (alpha, beta, gamma, etc.)', // MISSING
decimal : 'Decimal (1, 2, 3, etc.)', // MISSING
decimalLeadingZero : 'Decimal leading zero (01, 02, 03, etc.)' // MISSING
},
// Find And Replace Dialog
findAndReplace :
{
title : '尋找與取代',
find : '尋找',
replace : '取代',
findWhat : '尋找:',
replaceWith : '取代:',
notFoundMsg : '未找到指定的文字。',
findOptions : 'Find Options', // MISSING
matchCase : '大小寫須相符',
matchWord : '全字相符',
matchCyclic : '循環搜索',
replaceAll : '全部取代',
replaceSuccessMsg : '共完成 %1 次取代'
},
// Table Dialog
table :
{
toolbar : '表格',
title : '表格屬性',
menu : '表格屬性',
deleteTable : '刪除表格',
rows : '列數',
columns : '欄數',
border : '邊框',
widthPx : '像素',
widthPc : '百分比',
widthUnit : 'width unit', // MISSING
cellSpace : '間距',
cellPad : '內距',
caption : '標題',
summary : '摘要',
headers : '標題',
headersNone : '無標題',
headersColumn : '第一欄',
headersRow : '第一列',
headersBoth : '第一欄和第一列',
invalidRows : '必須有一或更多的列',
invalidCols : '必須有一或更多的欄',
invalidBorder : '邊框大小必須為數字格式',
invalidWidth : '表格寬度必須為數字格式',
invalidHeight : '表格高度必須為數字格式',
invalidCellSpacing : '儲存格間距必須為數字格式',
invalidCellPadding : '儲存格內距必須為數字格式',
cell :
{
menu : '儲存格',
insertBefore : '向左插入儲存格',
insertAfter : '向右插入儲存格',
deleteCell : '刪除儲存格',
merge : '合併儲存格',
mergeRight : '向右合併儲存格',
mergeDown : '向下合併儲存格',
splitHorizontal : '橫向分割儲存格',
splitVertical : '縱向分割儲存格',
title : '儲存格屬性',
cellType : '儲存格類別',
rowSpan : '儲存格列數',
colSpan : '儲存格欄數',
wordWrap : '自動換行',
hAlign : '水平對齊',
vAlign : '垂直對齊',
alignBaseline : '基線對齊',
bgColor : '背景顏色',
borderColor : '邊框顏色',
data : '數據',
header : '標題',
yes : '是',
no : '否',
invalidWidth : '儲存格寬度必須為數字格式',
invalidHeight : '儲存格高度必須為數字格式',
invalidRowSpan : '儲存格列數必須為整數格式',
invalidColSpan : '儲存格欄數度必須為整數格式',
chooseColor : 'Choose' // MISSING
},
row :
{
menu : '列',
insertBefore : '向上插入列',
insertAfter : '向下插入列',
deleteRow : '刪除列'
},
column :
{
menu : '欄',
insertBefore : '向左插入欄',
insertAfter : '向右插入欄',
deleteColumn : '刪除欄'
}
},
// Button Dialog.
button :
{
title : '按鈕屬性',
text : '顯示文字 (值)',
type : '類型',
typeBtn : '按鈕 (Button)',
typeSbm : '送出 (Submit)',
typeRst : '重設 (Reset)'
},
// Checkbox and Radio Button Dialogs.
checkboxAndRadio :
{
checkboxTitle : '核取方塊屬性',
radioTitle : '選項按鈕屬性',
value : '選取值',
selected : '已選取'
},
// Form Dialog.
form :
{
title : '表單屬性',
menu : '表單屬性',
action : '動作',
method : '方法',
encoding : '表單編碼'
},
// Select Field Dialog.
select :
{
title : '清單/選單屬性',
selectInfo : '資訊',
opAvail : '可用選項',
value : '值',
size : '大小',
lines : '行',
chkMulti : '可多選',
opText : '顯示文字',
opValue : '選取值',
btnAdd : '新增',
btnModify : '修改',
btnUp : '上移',
btnDown : '下移',
btnSetValue : '設為預設值',
btnDelete : '刪除'
},
// Textarea Dialog.
textarea :
{
title : '文字區域屬性',
cols : '字元寬度',
rows : '列數'
},
// Text Field Dialog.
textfield :
{
title : '文字方塊屬性',
name : '名稱',
value : '值',
charWidth : '字元寬度',
maxChars : '最多字元數',
type : '類型',
typeText : '文字',
typePass : '密碼'
},
// Hidden Field Dialog.
hidden :
{
title : '隱藏欄位屬性',
name : '名稱',
value : '值'
},
// Image Dialog.
image :
{
title : '影像屬性',
titleButton : '影像按鈕屬性',
menu : '影像屬性',
infoTab : '影像資訊',
btnUpload : '上傳至伺服器',
upload : '上傳',
alt : '替代文字',
lockRatio : '等比例',
resetSize : '重設為原大小',
border : '邊框',
hSpace : '水平距離',
vSpace : '垂直距離',
alertUrl : '請輸入影像 URL',
linkTab : '超連結',
button2Img : '要把影像按鈕改成影像嗎?',
img2Button : '要把影像改成影像按鈕嗎?',
urlMissing : 'Image source URL is missing.', // MISSING
validateBorder : 'Border must be a whole number.', // MISSING
validateHSpace : 'HSpace must be a whole number.', // MISSING
validateVSpace : 'VSpace must be a whole number.' // MISSING
},
// Flash Dialog
flash :
{
properties : 'Flash 屬性',
propertiesTab : '屬性',
title : 'Flash 屬性',
chkPlay : '自動播放',
chkLoop : '重複',
chkMenu : '開啟選單',
chkFull : '啟動全螢幕顯示',
scale : '縮放',
scaleAll : '全部顯示',
scaleNoBorder : '無邊框',
scaleFit : '精確符合',
access : '允許腳本訪問',
accessAlways : '永遠',
accessSameDomain: '相同域名',
accessNever : '永不',
alignAbsBottom : '絕對下方',
alignAbsMiddle : '絕對中間',
alignBaseline : '基準線',
alignTextTop : '文字上方',
quality : '質素',
qualityBest : '最好',
qualityHigh : '高',
qualityAutoHigh : '高(自動)',
qualityMedium : '中(自動)',
qualityAutoLow : '低(自動)',
qualityLow : '低',
windowModeWindow: '視窗',
windowModeOpaque: '不透明',
windowModeTransparent : '透明',
windowMode : '視窗模式',
flashvars : 'Flash 變數',
bgcolor : '背景顏色',
hSpace : '水平距離',
vSpace : '垂直距離',
validateSrc : '請輸入欲連結的 URL',
validateHSpace : '水平間距必須為數字格式',
validateVSpace : '垂直間距必須為數字格式'
},
// Speller Pages Dialog
spellCheck :
{
toolbar : '拼字檢查',
title : '拼字檢查',
notAvailable : '抱歉,服務目前暫不可用',
errorLoading : '無法聯系侍服器: %s.',
notInDic : '不在字典中',
changeTo : '更改為',
btnIgnore : '忽略',
btnIgnoreAll : '全部忽略',
btnReplace : '取代',
btnReplaceAll : '全部取代',
btnUndo : '復原',
noSuggestions : '- 無建議值 -',
progress : '進行拼字檢查中…',
noMispell : '拼字檢查完成:未發現拼字錯誤',
noChanges : '拼字檢查完成:未更改任何單字',
oneChange : '拼字檢查完成:更改了 1 個單字',
manyChanges : '拼字檢查完成:更改了 %1 個單字',
ieSpellDownload : '尚未安裝拼字檢查元件。您是否想要現在下載?'
},
smiley :
{
toolbar : '表情符號',
title : '插入表情符號',
options : 'Smiley Options' // MISSING
},
elementsPath :
{
eleLabel : 'Elements path', // MISSING
eleTitle : '%1 元素'
},
numberedlist : '編號清單',
bulletedlist : '項目清單',
indent : '增加縮排',
outdent : '減少縮排',
justify :
{
left : '靠左對齊',
center : '置中',
right : '靠右對齊',
block : '左右對齊'
},
blockquote : '引用文字',
clipboard :
{
title : '貼上',
cutError : '瀏覽器的安全性設定不允許編輯器自動執行剪下動作。請使用快捷鍵 (Ctrl/Cmd+X) 剪下。',
copyError : '瀏覽器的安全性設定不允許編輯器自動執行複製動作。請使用快捷鍵 (Ctrl/Cmd+C) 複製。',
pasteMsg : '請使用快捷鍵 (<strong>Ctrl/Cmd+V</strong>) 貼到下方區域中並按下 <strong>確定</strong>',
securityMsg : '因為瀏覽器的安全性設定,本編輯器無法直接存取您的剪貼簿資料,請您自行在本視窗進行貼上動作。',
pasteArea : 'Paste Area' // MISSING
},
pastefromword :
{
confirmCleanup : '您想貼上的文字似乎是自 Word 複製而來,請問您是否要先清除 Word 的格式後再行貼上?',
toolbar : '自 Word 貼上',
title : '自 Word 貼上',
error : 'It was not possible to clean up the pasted data due to an internal error' // MISSING
},
pasteText :
{
button : '貼為純文字格式',
title : '貼為純文字格式'
},
templates :
{
button : '樣版',
title : '內容樣版',
options : 'Template Options', // MISSING
insertOption : '取代原有內容',
selectPromptMsg : '請選擇欲開啟的樣版<br> (原有的內容將會被清除):',
emptyListMsg : '(無樣版)'
},
showBlocks : '顯示區塊',
stylesCombo :
{
label : '樣式',
panelTitle : 'Formatting Styles', // MISSING
panelTitle1 : '塊級元素樣式',
panelTitle2 : '內聯元素樣式',
panelTitle3 : '物件元素樣式'
},
format :
{
label : '格式',
panelTitle : '格式',
tag_p : '一般',
tag_pre : '已格式化',
tag_address : '位址',
tag_h1 : '標題 1',
tag_h2 : '標題 2',
tag_h3 : '標題 3',
tag_h4 : '標題 4',
tag_h5 : '標題 5',
tag_h6 : '標題 6',
tag_div : '一般 (DIV)'
},
div :
{
title : 'Create Div Container', // MISSING
toolbar : 'Create Div Container', // MISSING
cssClassInputLabel : 'Stylesheet Classes', // MISSING
styleSelectLabel : 'Style', // MISSING
IdInputLabel : 'Id', // MISSING
languageCodeInputLabel : ' Language Code', // MISSING
inlineStyleInputLabel : 'Inline Style', // MISSING
advisoryTitleInputLabel : 'Advisory Title', // MISSING
langDirLabel : 'Language Direction', // MISSING
langDirLTRLabel : 'Left to Right (LTR)', // MISSING
langDirRTLLabel : 'Right to Left (RTL)', // MISSING
edit : 'Edit Div', // MISSING
remove : 'Remove Div' // MISSING
},
iframe :
{
title : 'IFrame Properties', // MISSING
toolbar : 'IFrame', // MISSING
noUrl : 'Please type the iframe URL', // MISSING
scrolling : 'Enable scrollbars', // MISSING
border : 'Show frame border' // MISSING
},
font :
{
label : '字體',
voiceLabel : '字體',
panelTitle : '字體'
},
fontSize :
{
label : '大小',
voiceLabel : '文字大小',
panelTitle : '大小'
},
colorButton :
{
textColorTitle : '文字顏色',
bgColorTitle : '背景顏色',
panelTitle : 'Colors', // MISSING
auto : '自動',
more : '更多顏色…'
},
colors :
{
'000' : 'Black', // MISSING
'800000' : 'Maroon', // MISSING
'8B4513' : 'Saddle Brown', // MISSING
'2F4F4F' : 'Dark Slate Gray', // MISSING
'008080' : 'Teal', // MISSING
'000080' : 'Navy', // MISSING
'4B0082' : 'Indigo', // MISSING
'696969' : 'Dark Gray', // MISSING
'B22222' : 'Fire Brick', // MISSING
'A52A2A' : 'Brown', // MISSING
'DAA520' : 'Golden Rod', // MISSING
'006400' : 'Dark Green', // MISSING
'40E0D0' : 'Turquoise', // MISSING
'0000CD' : 'Medium Blue', // MISSING
'800080' : 'Purple', // MISSING
'808080' : 'Gray', // MISSING
'F00' : 'Red', // MISSING
'FF8C00' : 'Dark Orange', // MISSING
'FFD700' : 'Gold', // MISSING
'008000' : 'Green', // MISSING
'0FF' : 'Cyan', // MISSING
'00F' : 'Blue', // MISSING
'EE82EE' : 'Violet', // MISSING
'A9A9A9' : 'Dim Gray', // MISSING
'FFA07A' : 'Light Salmon', // MISSING
'FFA500' : 'Orange', // MISSING
'FFFF00' : 'Yellow', // MISSING
'00FF00' : 'Lime', // MISSING
'AFEEEE' : 'Pale Turquoise', // MISSING
'ADD8E6' : 'Light Blue', // MISSING
'DDA0DD' : 'Plum', // MISSING
'D3D3D3' : 'Light Grey', // MISSING
'FFF0F5' : 'Lavender Blush', // MISSING
'FAEBD7' : 'Antique White', // MISSING
'FFFFE0' : 'Light Yellow', // MISSING
'F0FFF0' : 'Honeydew', // MISSING
'F0FFFF' : 'Azure', // MISSING
'F0F8FF' : 'Alice Blue', // MISSING
'E6E6FA' : 'Lavender', // MISSING
'FFF' : 'White' // MISSING
},
scayt :
{
title : '即時拼寫檢查',
opera_title : 'Not supported by Opera', // MISSING
enable : '啟用即時拼寫檢查',
disable : '關閉即時拼寫檢查',
about : '關於即時拼寫檢查',
toggle : '啟用/關閉即時拼寫檢查',
options : '選項',
langs : '語言',
moreSuggestions : '更多拼寫建議',
ignore : '忽略',
ignoreAll : '全部忽略',
addWord : '添加單詞',
emptyDic : '字典名不應為空.',
optionsTab : '選項',
allCaps : 'Ignore All-Caps Words', // MISSING
ignoreDomainNames : 'Ignore Domain Names', // MISSING
mixedCase : 'Ignore Words with Mixed Case', // MISSING
mixedWithDigits : 'Ignore Words with Numbers', // MISSING
languagesTab : '語言',
dictionariesTab : '字典',
dic_field_name : 'Dictionary name', // MISSING
dic_create : 'Create', // MISSING
dic_restore : 'Restore', // MISSING
dic_delete : 'Delete', // MISSING
dic_rename : 'Rename', // MISSING
dic_info : 'Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.', // MISSING
aboutTab : '關於'
},
about :
{
title : '關於 CKEditor',
dlgTitle : '關於 CKEditor',
help : 'Check $1 for help.', // MISSING
userGuide : 'CKEditor User\'s Guide', // MISSING
moreInfo : '訪問我們的網站以獲取更多關於協議的信息',
copy : 'Copyright © $1. All rights reserved.'
},
maximize : '最大化',
minimize : '最小化',
fakeobjects :
{
anchor : '錨點',
flash : 'Flash 動畫',
iframe : 'IFrame', // MISSING
hiddenfield : 'Hidden Field', // MISSING
unknown : '不明物件'
},
resize : '拖拽改變大小',
colordialog :
{
title : 'Select color', // MISSING
options : 'Color Options', // MISSING
highlight : 'Highlight', // MISSING
selected : 'Selected Color', // MISSING
clear : 'Clear' // MISSING
},
toolbarCollapse : '折叠工具栏',
toolbarExpand : '展开工具栏',
toolbarGroups :
{
document : 'Document', // MISSING
clipboard : 'Clipboard/Undo', // MISSING
editing : 'Editing', // MISSING
forms : 'Forms', // MISSING
basicstyles : 'Basic Styles', // MISSING
paragraph : 'Paragraph', // MISSING
links : 'Links', // MISSING
insert : 'Insert', // MISSING
styles : 'Styles', // MISSING
colors : 'Colors', // MISSING
tools : 'Tools' // MISSING
},
bidi :
{
ltr : 'Text direction from left to right', // MISSING
rtl : 'Text direction from right to left' // MISSING
},
docprops :
{
label : '文件屬性',
title : '文件屬性',
design : 'Design', // MISSING
meta : 'Meta 資料',
chooseColor : 'Choose', // MISSING
other : '<其他>',
docTitle : '頁面標題',
charset : '字元編碼',
charsetOther : '其他字元編碼',
charsetASCII : 'ASCII', // MISSING
charsetCE : '中歐語系',
charsetCT : '正體中文 (Big5)',
charsetCR : '斯拉夫文',
charsetGR : '希臘文',
charsetJP : '日文',
charsetKR : '韓文',
charsetTR : '土耳其文',
charsetUN : 'Unicode (UTF-8)', // MISSING
charsetWE : '西歐語系',
docType : '文件類型',
docTypeOther : '其他文件類型',
xhtmlDec : '包含 XHTML 定義',
bgColor : '背景顏色',
bgImage : '背景影像',
bgFixed : '浮水印',
txtColor : '文字顏色',
margin : '頁面邊界',
marginTop : '上',
marginLeft : '左',
marginRight : '右',
marginBottom : '下',
metaKeywords : '文件索引關鍵字 (用半形逗號[,]分隔)',
metaDescription : '文件說明',
metaAuthor : '作者',
metaCopyright : '版權所有',
previewHtml : '<p>This is some <strong>sample text</strong>. You are using <a href="javascript:void(0)">CKEditor</a>.</p>' // MISSING
}
};
| MilesYM/Symfony | web/editor/_source/lang/zh.js | JavaScript | mit | 23,455 |
<div>
<ul>
<li><a class="js-like" href="/foo/create/" data-count="0"><i class="fa fa-heart"></i> like</a></li>
</ul>
</div>
<div>
<ul>
<li><a class="js-like" href="foo/delete/" data-count="1"><i class="fa fa-heart"></i> remove like</a></li>
</ul>
</div> | shriyanka/daemo-forum | static/spirit/scripts/test/fixtures/like.html | HTML | mit | 285 |
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
//-----------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
//
// For more information, see: http://go.microsoft.com/fwlink/?LinkID=623246
// </auto-generated>
//-----------------------------------------------------------------------------
#include "pch.h"
using namespace concurrency;
using namespace Microsoft::WRL;
using namespace Platform;
using namespace Windows::Foundation;
using namespace Windows::Devices::AllJoyn;
using namespace com::microsoft::Samples::SecureInterface;
std::map<alljoyn_busobject, WeakReference*> SecureInterfaceProducer::SourceObjects;
std::map<alljoyn_interfacedescription, WeakReference*> SecureInterfaceProducer::SourceInterfaces;
SecureInterfaceProducer::SecureInterfaceProducer(AllJoynBusAttachment^ busAttachment)
: m_busAttachment(busAttachment),
m_sessionListener(nullptr),
m_busObject(nullptr),
m_sessionPort(0),
m_sessionId(0)
{
m_weak = new WeakReference(this);
ServiceObjectPath = ref new String(L"/Service");
m_signals = ref new SecureInterfaceLegacySignals();
m_busAttachmentStateChangedToken.Value = 0;
}
SecureInterfaceProducer::~SecureInterfaceProducer()
{
UnregisterFromBus();
delete m_weak;
}
void SecureInterfaceProducer::UnregisterFromBus()
{
if ((nullptr != m_busAttachment) && (0 != m_busAttachmentStateChangedToken.Value))
{
m_busAttachment->StateChanged -= m_busAttachmentStateChangedToken;
m_busAttachmentStateChangedToken.Value = 0;
}
if ((nullptr != m_busAttachment) && (nullptr != SessionPortListener))
{
alljoyn_busattachment_unbindsessionport(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), m_sessionPort);
alljoyn_sessionportlistener_destroy(SessionPortListener);
SessionPortListener = nullptr;
}
if ((nullptr != m_busAttachment) && (nullptr != BusObject))
{
alljoyn_busattachment_unregisterbusobject(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), BusObject);
alljoyn_busobject_destroy(BusObject);
BusObject = nullptr;
}
if ((nullptr != m_busAttachment) && (nullptr != SessionListener))
{
alljoyn_busattachment_leavesession(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), m_sessionId);
alljoyn_busattachment_setsessionlistener(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), m_sessionId, nullptr);
alljoyn_sessionlistener_destroy(SessionListener);
SessionListener = nullptr;
}
}
bool SecureInterfaceProducer::OnAcceptSessionJoiner(_In_ alljoyn_sessionport sessionPort, _In_ PCSTR joiner, _In_ const alljoyn_sessionopts opts)
{
UNREFERENCED_PARAMETER(sessionPort); UNREFERENCED_PARAMETER(joiner); UNREFERENCED_PARAMETER(opts);
return true;
}
void SecureInterfaceProducer::OnSessionJoined(_In_ alljoyn_sessionport sessionPort, _In_ alljoyn_sessionid id, _In_ PCSTR joiner)
{
UNREFERENCED_PARAMETER(joiner);
// We initialize the Signals object after the session has been joined, because it needs
// the session id.
m_signals->Initialize(BusObject, id);
m_sessionPort = sessionPort;
m_sessionId = id;
alljoyn_sessionlistener_callbacks callbacks =
{
AllJoynHelpers::SessionLostHandler<SecureInterfaceProducer>,
AllJoynHelpers::SessionMemberAddedHandler<SecureInterfaceProducer>,
AllJoynHelpers::SessionMemberRemovedHandler<SecureInterfaceProducer>
};
SessionListener = alljoyn_sessionlistener_create(&callbacks, m_weak);
alljoyn_busattachment_setsessionlistener(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), id, SessionListener);
}
void SecureInterfaceProducer::OnSessionLost(_In_ alljoyn_sessionid sessionId, _In_ alljoyn_sessionlostreason reason)
{
if (sessionId == m_sessionId)
{
AllJoynSessionLostEventArgs^ args = ref new AllJoynSessionLostEventArgs(static_cast<AllJoynSessionLostReason>(reason));
SessionLost(this, args);
}
}
void SecureInterfaceProducer::OnSessionMemberAdded(_In_ alljoyn_sessionid sessionId, _In_ PCSTR uniqueName)
{
if (sessionId == m_sessionId)
{
auto args = ref new AllJoynSessionMemberAddedEventArgs(AllJoynHelpers::MultibyteToPlatformString(uniqueName));
SessionMemberAdded(this, args);
}
}
void SecureInterfaceProducer::OnSessionMemberRemoved(_In_ alljoyn_sessionid sessionId, _In_ PCSTR uniqueName)
{
if (sessionId == m_sessionId)
{
auto args = ref new AllJoynSessionMemberRemovedEventArgs(AllJoynHelpers::MultibyteToPlatformString(uniqueName));
SessionMemberRemoved(this, args);
}
}
void SecureInterfaceProducer::BusAttachmentStateChanged(_In_ AllJoynBusAttachment^ sender, _In_ AllJoynBusAttachmentStateChangedEventArgs^ args)
{
if (args->State == AllJoynBusAttachmentState::Connected)
{
QStatus result = AllJoynHelpers::CreateProducerSession<SecureInterfaceProducer>(m_busAttachment, m_weak);
if (ER_OK != result)
{
StopInternal(result);
return;
}
}
else if (args->State == AllJoynBusAttachmentState::Disconnected)
{
StopInternal(ER_BUS_STOPPING);
}
}
void SecureInterfaceProducer::CallConcatenateHandler(_Inout_ alljoyn_busobject busObject, _In_ alljoyn_message message)
{
auto source = SourceObjects.find(busObject);
if (source == SourceObjects.end())
{
return;
}
SecureInterfaceProducer^ producer = source->second->Resolve<SecureInterfaceProducer>();
if (producer->Service != nullptr)
{
AllJoynMessageInfo^ callInfo = ref new AllJoynMessageInfo(AllJoynHelpers::MultibyteToPlatformString(alljoyn_message_getsender(message)));
Platform::String^ inputArg0;
(void)TypeConversionHelpers::GetAllJoynMessageArg(alljoyn_message_getarg(message, 0), "s", &inputArg0);
Platform::String^ inputArg1;
(void)TypeConversionHelpers::GetAllJoynMessageArg(alljoyn_message_getarg(message, 1), "s", &inputArg1);
SecureInterfaceConcatenateResult^ result = create_task(producer->Service->ConcatenateAsync(callInfo, inputArg0, inputArg1)).get();
create_task([](){}).then([=]
{
int32 status;
if (nullptr == result)
{
alljoyn_busobject_methodreply_status(busObject, message, ER_BUS_NO_LISTENER);
return;
}
status = result->Status;
if (AllJoynStatus::Ok != status)
{
alljoyn_busobject_methodreply_status(busObject, message, static_cast<QStatus>(status));
return;
}
size_t argCount = 1;
alljoyn_msgarg outputs = alljoyn_msgarg_array_create(argCount);
status = TypeConversionHelpers::SetAllJoynMessageArg(alljoyn_msgarg_array_element(outputs, 0), "s", result->OutStr);
if (AllJoynStatus::Ok != status)
{
alljoyn_busobject_methodreply_status(busObject, message, static_cast<QStatus>(status));
alljoyn_msgarg_destroy(outputs);
return;
}
alljoyn_busobject_methodreply_args(busObject, message, outputs, argCount);
alljoyn_msgarg_destroy(outputs);
}, result->m_creationContext).wait();
}
}
void SecureInterfaceProducer::CallTextSentSignalHandler(_In_ const alljoyn_interfacedescription_member* member, _In_ alljoyn_message message)
{
auto source = SourceInterfaces.find(member->iface);
if (source == SourceInterfaces.end())
{
return;
}
auto producer = source->second->Resolve<SecureInterfaceProducer>();
if (producer->Signals != nullptr)
{
auto callInfo = ref new AllJoynMessageInfo(AllJoynHelpers::MultibyteToPlatformString(alljoyn_message_getsender(message)));
auto eventArgs = ref new SecureInterfaceTextSentReceivedEventArgs();
eventArgs->MessageInfo = callInfo;
Platform::String^ argument0;
(void)TypeConversionHelpers::GetAllJoynMessageArg(alljoyn_message_getarg(message, 0), "s", &argument0);
eventArgs->Message = argument0;
producer->Signals->CallTextSentReceived(producer->Signals, eventArgs);
}
}
QStatus SecureInterfaceProducer::AddMethodHandler(_In_ alljoyn_interfacedescription interfaceDescription, _In_ PCSTR methodName, _In_ alljoyn_messagereceiver_methodhandler_ptr handler)
{
alljoyn_interfacedescription_member member;
if (!alljoyn_interfacedescription_getmember(interfaceDescription, methodName, &member))
{
return ER_BUS_INTERFACE_NO_SUCH_MEMBER;
}
return alljoyn_busobject_addmethodhandler(
m_busObject,
member,
handler,
m_weak);
}
QStatus SecureInterfaceProducer::AddSignalHandler(_In_ alljoyn_busattachment busAttachment, _In_ alljoyn_interfacedescription interfaceDescription, _In_ PCSTR methodName, _In_ alljoyn_messagereceiver_signalhandler_ptr handler)
{
alljoyn_interfacedescription_member member;
if (!alljoyn_interfacedescription_getmember(interfaceDescription, methodName, &member))
{
return ER_BUS_INTERFACE_NO_SUCH_MEMBER;
}
return alljoyn_busattachment_registersignalhandler(busAttachment, handler, member, NULL);
}
QStatus SecureInterfaceProducer::OnPropertyGet(_In_ PCSTR interfaceName, _In_ PCSTR propertyName, _Inout_ alljoyn_msgarg value)
{
UNREFERENCED_PARAMETER(interfaceName);
if (0 == strcmp(propertyName, "IsUpperCaseEnabled"))
{
auto task = create_task(Service->GetIsUpperCaseEnabledAsync(nullptr));
auto result = task.get();
return create_task([](){}).then([=]() -> QStatus
{
if (AllJoynStatus::Ok != result->Status)
{
return static_cast<QStatus>(result->Status);
}
return static_cast<QStatus>(TypeConversionHelpers::SetAllJoynMessageArg(value, "b", result->IsUpperCaseEnabled));
}, result->m_creationContext).get();
}
return ER_BUS_NO_SUCH_PROPERTY;
}
QStatus SecureInterfaceProducer::OnPropertySet(_In_ PCSTR interfaceName, _In_ PCSTR propertyName, _In_ alljoyn_msgarg value)
{
UNREFERENCED_PARAMETER(interfaceName);
if (0 == strcmp(propertyName, "IsUpperCaseEnabled"))
{
bool argument;
QStatus status = static_cast<QStatus>(TypeConversionHelpers::GetAllJoynMessageArg(value, "b", &argument));
if (ER_OK == status)
{
auto task = create_task(Service->SetIsUpperCaseEnabledAsync(nullptr, argument));
auto result = task.get();
status = static_cast<QStatus>(result->Status);
}
return status;
}
return ER_BUS_NO_SUCH_PROPERTY;
}
void SecureInterfaceProducer::EmitIsUpperCaseEnabledChanged()
{
create_task([&]
{
alljoyn_msgarg value = alljoyn_msgarg_create();
OnPropertyGet("com.microsoft.Samples.SecureInterface", "IsUpperCaseEnabled", value);
alljoyn_busobject_emitpropertychanged(
m_busObject,
"com.microsoft.Samples.SecureInterface",
"IsUpperCaseEnabled",
value,
m_sessionId);
alljoyn_msgarg_destroy(value);
});
}
void SecureInterfaceProducer::Start()
{
if (nullptr == m_busAttachment)
{
StopInternal(ER_FAIL);
return;
}
QStatus result = AllJoynHelpers::CreateInterfaces(m_busAttachment, c_SecureInterfaceIntrospectionXml);
if (result != ER_OK)
{
StopInternal(result);
return;
}
result = AllJoynHelpers::CreateBusObject<SecureInterfaceProducer>(m_weak);
if (result != ER_OK)
{
StopInternal(result);
return;
}
alljoyn_interfacedescription interfaceDescription = alljoyn_busattachment_getinterface(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), "com.microsoft.Samples.SecureInterface");
if (interfaceDescription == nullptr)
{
StopInternal(ER_FAIL);
return;
}
alljoyn_busobject_addinterface_announced(BusObject, interfaceDescription);
result = AddMethodHandler(
interfaceDescription,
"Concatenate",
[](alljoyn_busobject busObject, const alljoyn_interfacedescription_member* member, alljoyn_message message) { UNREFERENCED_PARAMETER(member); CallConcatenateHandler(busObject, message); });
if (result != ER_OK)
{
StopInternal(result);
return;
}
result = AddSignalHandler(
AllJoynHelpers::GetInternalBusAttachment(m_busAttachment),
interfaceDescription,
"TextSent",
[](const alljoyn_interfacedescription_member* member, PCSTR srcPath, alljoyn_message message) { UNREFERENCED_PARAMETER(srcPath); CallTextSentSignalHandler(member, message); });
if (result != ER_OK)
{
StopInternal(result);
return;
}
SourceObjects[m_busObject] = m_weak;
SourceInterfaces[interfaceDescription] = m_weak;
unsigned int noneMechanismIndex = 0;
bool authenticationMechanismsContainsNone = m_busAttachment->AuthenticationMechanisms->IndexOf(AllJoynAuthenticationMechanism::None, &noneMechanismIndex);
QCC_BOOL interfaceIsSecure = alljoyn_interfacedescription_issecure(interfaceDescription);
// If the current set of AuthenticationMechanisms supports authentication,
// determine whether a secure BusObject is required.
if (AllJoynHelpers::CanSecure(m_busAttachment->AuthenticationMechanisms))
{
// Register the BusObject as "secure" if the org.alljoyn.Bus.Secure XML annotation
// is specified, or if None is not present in AuthenticationMechanisms.
if (!authenticationMechanismsContainsNone || interfaceIsSecure)
{
result = alljoyn_busattachment_registerbusobject_secure(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), BusObject);
}
else
{
result = alljoyn_busattachment_registerbusobject(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), BusObject);
}
}
else
{
// If the current set of AuthenticationMechanisms does not support authentication
// but the interface requires security, report an error.
if (interfaceIsSecure)
{
result = ER_BUS_NO_AUTHENTICATION_MECHANISM;
}
else
{
result = alljoyn_busattachment_registerbusobject(AllJoynHelpers::GetInternalBusAttachment(m_busAttachment), BusObject);
}
}
if (result != ER_OK)
{
StopInternal(result);
return;
}
m_busAttachmentStateChangedToken = m_busAttachment->StateChanged += ref new TypedEventHandler<AllJoynBusAttachment^,AllJoynBusAttachmentStateChangedEventArgs^>(this, &SecureInterfaceProducer::BusAttachmentStateChanged);
m_busAttachment->Connect();
}
void SecureInterfaceProducer::Stop()
{
StopInternal(AllJoynStatus::Ok);
}
void SecureInterfaceProducer::StopInternal(int32 status)
{
UnregisterFromBus();
Stopped(this, ref new AllJoynProducerStoppedEventArgs(status));
}
int32 SecureInterfaceProducer::RemoveMemberFromSession(_In_ String^ uniqueName)
{
return alljoyn_busattachment_removesessionmember(
AllJoynHelpers::GetInternalBusAttachment(m_busAttachment),
m_sessionId,
AllJoynHelpers::PlatformToMultibyteString(uniqueName).data());
}
PCSTR com::microsoft::Samples::SecureInterface::c_SecureInterfaceIntrospectionXml = "<interface name=\"com.microsoft.Samples.SecureInterface\">"
" <description>A secure AllJoyn sample</description>"
" <annotation name=\"org.alljoyn.Bus.Secure\" value=\"true\" />"
" <method name=\"Concatenate\">"
" <description>Concatenate two input strings and returns the concatenated string as output</description>"
" <arg name=\"inStr1\" type=\"s\" direction=\"in\" />"
" <arg name=\"inStr2\" type=\"s\" direction=\"in\" />"
" <arg name=\"outStr\" type=\"s\" direction=\"out\" />"
" </method>"
" <property name=\"IsUpperCaseEnabled\" type=\"b\" access=\"readwrite\">"
" <description>Determine if the output of the Concatenate method is returned as upper case string or not</description>"
" <annotation name=\"org.freedesktop.DBus.Property.EmitsChangedSignal\" value=\"true\" />"
" </property>"
" <signal name=\"TextSent\">"
" <description>This signal is emitted when producer sends a text message to consumer</description>"
" <arg name=\"message\" type=\"s\" />"
" </signal>"
"</interface>"
; | oldnewthing/Windows-universal-samples | Samples/AllJoyn/Common/Scenario1WinRTComponent/SecureInterfaceProducer.cpp | C++ | mit | 17,098 |
//-----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
//-----------------------------------------------------------------------------
namespace System.Activities.DynamicUpdate
{
using System;
using System.IO;
using System.Activities;
using System.Activities.Expressions;
using System.Activities.DynamicUpdate;
using System.Activities.Hosting;
using System.Activities.Runtime;
using System.Collections;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.Runtime;
using System.Runtime.CompilerServices;
using System.Xaml;
using System.Activities.Validation;
using Microsoft.VisualBasic.Activities;
public static class DynamicUpdateServices
{
private static Func<Activity, Exception> onInvalidActivityToBlockUpdate =
new Func<Activity, Exception>(OnInvalidActivityToBlockUpdate);
private static Func<Activity, Exception> onInvalidImplementationMapAssociation =
new Func<Activity, Exception>(OnInvalidImplementationMapAssociation);
private static AttachableMemberIdentifier implementationMapProperty = new AttachableMemberIdentifier(typeof(DynamicUpdateServices), "ImplementationMap");
public static void PrepareForUpdate(Activity workflowDefinitionToBeUpdated)
{
if (workflowDefinitionToBeUpdated == null)
{
throw FxTrace.Exception.ArgumentNull("workflowDefinitionToBeUpdated");
}
InternalPrepareForUpdate(workflowDefinitionToBeUpdated, false);
}
public static void PrepareForUpdate(ActivityBuilder activityDefinitionToBeUpdated)
{
if (activityDefinitionToBeUpdated == null)
{
throw FxTrace.Exception.ArgumentNull("activityDefinitionToBeUpdated");
}
InternalPrepareForUpdate(activityDefinitionToBeUpdated, true);
}
private static void InternalPrepareForUpdate(object definitionToBeUpdated, bool forImplementation)
{
// Clone the definition
object clone;
using (XamlObjectReader reader = new XamlObjectReader(definitionToBeUpdated))
{
using (XamlObjectWriter writer = new XamlObjectWriter(reader.SchemaContext))
{
XamlServices.Transform(reader, writer);
clone = writer.Result;
}
}
// Calculate the match info
// Set the match info as attached properties so it is serializable,
// and available when the user calls CreateUpdateMap
IDictionary<object, DynamicUpdateMapItem> mapItems;
if (!forImplementation)
{
DynamicUpdateInfo.SetOriginalDefinition(definitionToBeUpdated, (Activity)clone);
mapItems = DynamicUpdateMap.CalculateMapItems((Activity)definitionToBeUpdated);
}
else
{
DynamicUpdateInfo.SetOriginalActivityBuilder(definitionToBeUpdated, (ActivityBuilder)clone);
mapItems = DynamicUpdateMap.CalculateImplementationMapItems(GetDynamicActivity((ActivityBuilder)definitionToBeUpdated));
}
foreach (KeyValuePair<object, DynamicUpdateMapItem> objectInfo in mapItems)
{
DynamicUpdateInfo.SetMapItem(objectInfo.Key, objectInfo.Value);
}
}
public static DynamicUpdateMap CreateUpdateMap(Activity updatedWorkflowDefinition)
{
return CreateUpdateMap(updatedWorkflowDefinition, null);
}
public static DynamicUpdateMap CreateUpdateMap(Activity updatedWorkflowDefinition, IEnumerable<Activity> disallowUpdateInsideActivities)
{
IList<ActivityBlockingUpdate> activitiesBlockingUpdate;
return CreateUpdateMap(updatedWorkflowDefinition, disallowUpdateInsideActivities, out activitiesBlockingUpdate);
}
[SuppressMessage(FxCop.Category.Design, FxCop.Rule.AvoidOutParameters, Justification = "Approved Design. Need to return the map and the block list.")]
public static DynamicUpdateMap CreateUpdateMap(Activity updatedWorkflowDefinition, IEnumerable<Activity> disallowUpdateInsideActivities, out IList<ActivityBlockingUpdate> activitiesBlockingUpdate)
{
if (updatedWorkflowDefinition == null)
{
throw FxTrace.Exception.ArgumentNull("updatedWorkflowDefinition");
}
Activity originalDefinition = DynamicUpdateInfo.GetOriginalDefinition(updatedWorkflowDefinition);
if (originalDefinition == null)
{
throw FxTrace.Exception.Argument("updatedWorkflowDefinition", SR.MustCallPrepareBeforeFinalize);
}
DynamicUpdateMap result = InternalTryCreateUpdateMap(updatedWorkflowDefinition, originalDefinition, disallowUpdateInsideActivities, false, out activitiesBlockingUpdate);
// Remove the DynamicUpdateMapItems now that the update is finalized
// Calling CalculateMapItems is actually an unnecessary perf hit since it calls CacheMetadata
// again; but we do it so that Finalize is implemented purely in terms of other public APIs.
DynamicUpdateInfo.SetOriginalDefinition(updatedWorkflowDefinition, null);
IDictionary<object, DynamicUpdateMapItem> mapItems = DynamicUpdateMap.CalculateMapItems(updatedWorkflowDefinition);
foreach (object matchObject in mapItems.Keys)
{
DynamicUpdateInfo.SetMapItem(matchObject, null);
}
return result;
}
public static DynamicUpdateMap CreateUpdateMap(ActivityBuilder updatedActivityDefinition)
{
return CreateUpdateMap(updatedActivityDefinition, null);
}
public static DynamicUpdateMap CreateUpdateMap(ActivityBuilder updatedActivityDefinition, IEnumerable<Activity> disallowUpdateInsideActivities)
{
IList<ActivityBlockingUpdate> activitiesBlockingUpdate;
return CreateUpdateMap(updatedActivityDefinition, disallowUpdateInsideActivities, out activitiesBlockingUpdate);
}
[SuppressMessage(FxCop.Category.Design, FxCop.Rule.AvoidOutParameters, Justification = "Approved Design. Need to return the map and the block list.")]
public static DynamicUpdateMap CreateUpdateMap(ActivityBuilder updatedActivityDefinition, IEnumerable<Activity> disallowUpdateInsideActivities, out IList<ActivityBlockingUpdate> activitiesBlockingUpdate)
{
if (updatedActivityDefinition == null)
{
throw FxTrace.Exception.ArgumentNull("updatedActivityDefinition");
}
ActivityBuilder originalActivityDefinition = DynamicUpdateInfo.GetOriginalActivityBuilder(updatedActivityDefinition);
if (originalActivityDefinition == null)
{
throw FxTrace.Exception.Argument("updatedActivityDefinition", SR.MustCallPrepareBeforeFinalize);
}
Activity originalBuiltRoot = GetDynamicActivity(originalActivityDefinition);
Activity updatedBuiltRoot = GetDynamicActivity(updatedActivityDefinition);
DynamicUpdateMap result = InternalTryCreateUpdateMap(updatedBuiltRoot, originalBuiltRoot, disallowUpdateInsideActivities, true, out activitiesBlockingUpdate);
// Remove the DynamicUpdateMapItems now that the update is finalized
// Calling CalculateMapItems is actually an unnecessary perf hit since it calls CacheMetadata
// again; but we do it so that Finalize is implemented purely in terms of other public APIs.
DynamicUpdateInfo.SetOriginalActivityBuilder(updatedActivityDefinition, null);
IDictionary<object, DynamicUpdateMapItem> mapItems = DynamicUpdateMap.CalculateImplementationMapItems(updatedBuiltRoot);
foreach (object matchObject in mapItems.Keys)
{
DynamicUpdateInfo.SetMapItem(matchObject, null);
}
return result;
}
private static DynamicUpdateMap InternalTryCreateUpdateMap(Activity updatedDefinition, Activity originalDefinition, IEnumerable<Activity> disallowUpdateInsideActivities, bool forImplementation, out IList<ActivityBlockingUpdate> activitiesBlockingUpdate)
{
DynamicUpdateMapBuilder builder = new DynamicUpdateMapBuilder
{
ForImplementation = forImplementation,
LookupMapItem = DynamicUpdateInfo.GetMapItem,
LookupImplementationMap = GetImplementationMap,
UpdatedWorkflowDefinition = updatedDefinition,
OriginalWorkflowDefinition = originalDefinition,
OnInvalidActivityToBlockUpdate = onInvalidActivityToBlockUpdate,
OnInvalidImplementationMapAssociation = onInvalidImplementationMapAssociation,
};
if (disallowUpdateInsideActivities != null)
{
foreach (Activity activity in disallowUpdateInsideActivities)
{
builder.DisallowUpdateInside.Add(activity);
}
}
return builder.CreateMap(out activitiesBlockingUpdate);
}
public static DynamicUpdateMap GetImplementationMap(Activity targetActivity)
{
DynamicUpdateMap result;
if (AttachablePropertyServices.TryGetProperty(targetActivity, implementationMapProperty, out result))
{
return result;
}
else
{
return null;
}
}
public static void SetImplementationMap(Activity targetActivity, DynamicUpdateMap implementationMap)
{
if (implementationMap != null)
{
AttachablePropertyServices.SetProperty(targetActivity, implementationMapProperty, implementationMap);
}
else
{
AttachablePropertyServices.RemoveProperty(targetActivity, implementationMapProperty);
}
}
static DynamicActivity GetDynamicActivity(ActivityBuilder activityDefinition)
{
DynamicActivity result = new DynamicActivity
{
Name = activityDefinition.Name
};
foreach (DynamicActivityProperty property in activityDefinition.Properties)
{
result.Properties.Add(property);
}
foreach (Attribute attrib in activityDefinition.Attributes)
{
result.Attributes.Add(attrib);
}
foreach (Constraint constraint in activityDefinition.Constraints)
{
result.Constraints.Add(constraint);
}
result.Implementation = () => activityDefinition.Implementation;
VisualBasicSettings vbsettings = VisualBasic.GetSettings(activityDefinition);
if (vbsettings != null)
{
VisualBasic.SetSettings(result, vbsettings);
}
IList<string> namespacesForImplementation = TextExpression.GetNamespacesForImplementation(activityDefinition);
if (namespacesForImplementation.Count > 0)
{
TextExpression.SetNamespacesForImplementation(result, namespacesForImplementation);
}
IList<AssemblyReference> referencesForImplementation = TextExpression.GetReferencesForImplementation(activityDefinition);
if (referencesForImplementation.Count > 0)
{
TextExpression.SetReferencesForImplementation(result, referencesForImplementation);
}
return result;
}
static Exception OnInvalidActivityToBlockUpdate(Activity activity)
{
return new ArgumentException(SR.InvalidActivityToBlockUpdateServices(activity), "disallowUpdateInsideActivities");
}
static Exception OnInvalidImplementationMapAssociation(Activity activity)
{
return new InvalidOperationException(SR.InvalidImplementationMapAssociationServices(activity));
}
}
}
| sekcheong/referencesource | System.Activities/System/Activities/DynamicUpdate/DynamicUpdateServices.cs | C# | mit | 12,544 |
/**
* @module nock/scope
*/
var fs = require('fs')
, globalIntercept = require('./intercept')
, mixin = require('./mixin')
, matchBody = require('./match_body')
, common = require('./common')
, assert = require('assert')
, url = require('url')
, _ = require('lodash')
, debug = require('debug')('nock.scope');
function isStream(obj) {
return (typeof obj !== 'undefined') && (typeof a !== 'string') && (! Buffer.isBuffer(obj)) && (typeof obj.setEncoding === 'function');
}
function startScope(basePath, options) {
var interceptors = {},
scope,
transformPathFunction,
transformRequestBodyFunction,
matchHeaders = [],
logger = debug,
scopeOptions = options || {},
urlParts = url.parse(basePath),
port = urlParts.port || ((urlParts.protocol === 'http:') ? 80 : 443),
persist = false;
basePath = urlParts.protocol + '//' + urlParts.hostname + ':' + port;
function add(key, interceptor, scope) {
if (! interceptors.hasOwnProperty(key)) {
interceptors[key] = [];
}
interceptors[key].push(interceptor);
globalIntercept(basePath, interceptor, scope, scopeOptions);
}
function remove(key, interceptor) {
if (persist) {
return;
}
var arr = interceptors[key];
if (arr) {
arr.splice(arr.indexOf(interceptor), 1);
if (arr.length === 0) { delete interceptors[key]; }
}
}
function intercept(uri, method, requestBody, interceptorOptions) {
var interceptorMatchHeaders = [];
var key = method.toUpperCase() + ' ' + basePath + uri;
function reply(statusCode, body, headers) {
this.statusCode = statusCode;
this.options = interceptorOptions || {};
for(var opt in scopeOptions) {
if(typeof this.options[opt] === 'undefined') {
this.options[opt] = scopeOptions[opt];
}
}
if (scope._defaultReplyHeaders) {
headers = headers || {};
headers = mixin(scope._defaultReplyHeaders, headers);
}
if (headers !== undefined) {
this.headers = {};
// makes sure all keys in headers are in lower case
for (var key2 in headers) {
if (headers.hasOwnProperty(key2)) {
this.headers[key2.toLowerCase()] = headers[key2];
}
}
}
// If the content is not encoded we may need to transform the response body.
// Otherwise we leave it as it is.
if(!common.isContentEncoded(headers)) {
if (body && typeof(body) !== 'string' && typeof(body) !== 'function' && !Buffer.isBuffer(body) && !isStream(body)) {
try {
body = JSON.stringify(body);
if (!this.headers) {
this.headers = {};
}
if (!this.headers['content-type']) {
this.headers['content-type'] = 'application/json';
}
} catch(err) {
throw new Error('Error encoding response body into JSON');
}
}
}
this.body = body;
add(key, this, scope, scopeOptions);
return scope;
}
function replyWithFile(statusCode, filePath, headers) {
var readStream = fs.createReadStream(filePath);
readStream.pause();
this.filePath = filePath;
return reply.call(this, statusCode, readStream, headers);
}
var matchStringOrRegexp = function(target, pattern) {
if (pattern instanceof RegExp) {
return target.match(pattern);
} else {
return target === pattern;
}
};
function match(options, body, hostNameOnly) {
if (hostNameOnly) {
return options.hostname === urlParts.hostname;
}
var method = options.method || 'GET'
, path = options.path
, matches
, proto = options.proto;
if (transformPathFunction) {
path = transformPathFunction(path);
}
if (typeof(body) !== 'string') {
body = body.toString();
}
if (transformRequestBodyFunction) {
body = transformRequestBodyFunction(body);
}
var checkHeaders = function(header) {
return matchStringOrRegexp(options.getHeader(header.name), header.value);
};
if (!matchHeaders.every(checkHeaders) ||
!interceptorMatchHeaders.every(checkHeaders)) {
logger('headers don\'t match');
return false;
}
// Also match request headers
// https://github.com/pgte/nock/issues/163
function reqheaderMatches(key) {
return ! options.headers || options.headers[key] == this.reqheaders[key];
}
var reqHeadersMatch =
! this.reqheaders ||
Object.keys(this.reqheaders).every(reqheaderMatches.bind(this));
if (!reqHeadersMatch) {
logger('request headers don\'t match');
return false;
}
var matchKey = method.toUpperCase() + ' ';
// If we have a filtered scope then we use it instead reconstructing
// the scope from the request options (proto, host and port) as these
// two won't necessarily match and we have to remove the scope that was
// matched (vs. that was defined).
if(this.__nock_filteredScope) {
matchKey += this.__nock_filteredScope;
} else {
matchKey += proto + '://' + options.host;
if (
options.port && options.host.indexOf(':') < 0 &&
(options.port !== 80 || options.proto !== 'http') &&
(options.port !== 443 || options.proto !== 'https')
) {
matchKey += ":" + options.port;
}
}
matchKey += path;
matches = matchKey === this._key;
logger('matching ' + matchKey + ' to ' + this._key + ': ' + matches);
if (matches) {
matches = (matchBody.call(options, this._requestBody, body));
if(!matches) {
logger('bodies don\'t match');
}
}
return matches;
}
function matchIndependentOfBody(options) {
var method = options.method || 'GET'
, path = options.path
, proto = options.proto;
if (transformPathFunction) {
path = transformPathFunction(path);
}
var checkHeaders = function(header) {
return options.getHeader && matchStringOrRegexp(options.getHeader(header.name), header.value);
};
if (!matchHeaders.every(checkHeaders) ||
!interceptorMatchHeaders.every(checkHeaders)) {
return false;
}
var matchKey = method + ' ' + proto + '://' + options.host + path;
return this._key === matchKey;
}
function filteringPath() {
if (typeof arguments[0] === 'function') {
this.transformFunction = arguments[0];
}
return this;
}
function discard() {
if (persist && this.filePath) {
this.body = fs.createReadStream(this.filePath);
this.body.pause();
}
if (! persist) {
remove(this._key, this);
}
}
function matchHeader(name, value) {
interceptorMatchHeaders.push({ name: name, value: value });
return this;
}
/**
* Set number of times will repeat the interceptor
* @name times
* @param Integer Number of times to repeat (should be > 0)
* @public
* @example
* // Will repeat mock 5 times for same king of request
* nock('http://zombo.com).get('/').times(5).reply(200, 'Ok');
*/
function times(newCounter) {
if (newCounter < 1) {
return this;
}
this.counter = newCounter;
return this;
}
/**
* An sugar sintaxe for times(1)
* @name once
* @see {@link times}
* @public
* @example
* nock('http://zombo.com).get('/').once.reply(200, 'Ok');
*/
function once() {
return this.times(1);
}
/**
* An sugar sintaxe for times(2)
* @name twixe
* @see {@link times}
* @public
* @example
* nock('http://zombo.com).get('/').twice.reply(200, 'Ok');
*/
function twice() {
return this.times(2);
}
/**
* An sugar sintaxe for times(3).
* @name thrice
* @see {@link times}
* @public
* @example
* nock('http://zombo.com).get('/').thrice.reply(200, 'Ok');
*/
function thrice() {
return this.times(3);
}
/**
* Delay the response by a certain number of ms.
*
* @param {integer} ms - Number of milliseconds to wait
* @return {scope} - the current scope for chaining
*/
function delay(ms) {
this.delayInMs = ms;
return this;
}
/**
* Delay the connection by a certain number of ms.
*
* @param {integer} ms - Number of milliseconds to wait
* @return {scope} - the current scope for chaining
*/
function delayConnection(ms) {
this.delayConnectionInMs = ms;
return this;
}
var interceptor = {
_key: key
, counter: 1
, _requestBody: requestBody
, reqheaders: (options && options.reqheaders) || {}
, reply: reply
, replyWithFile: replyWithFile
, discard: discard
, match: match
, matchIndependentOfBody: matchIndependentOfBody
, filteringPath: filteringPath
, matchHeader: matchHeader
, times: times
, once: once
, twice: twice
, thrice: thrice
, delay: delay
, delayConnection: delayConnection
};
return interceptor;
}
function get(uri, requestBody, options) {
return intercept(uri, 'GET', requestBody, options);
}
function post(uri, requestBody, options) {
return intercept(uri, 'POST', requestBody, options);
}
function put(uri, requestBody, options) {
return intercept(uri, 'PUT', requestBody, options);
}
function head(uri, requestBody, options) {
return intercept(uri, 'HEAD', requestBody, options);
}
function patch(uri, requestBody, options) {
return intercept(uri, 'PATCH', requestBody, options);
}
function merge(uri, requestBody, options) {
return intercept(uri, 'MERGE', requestBody, options);
}
function _delete(uri, requestBody, options) {
return intercept(uri, 'DELETE', requestBody, options);
}
function pendingMocks() {
return Object.keys(interceptors);
}
function isDone() {
// if nock is turned off, it always says it's done
if (! globalIntercept.isOn()) { return true; }
var keys = Object.keys(interceptors);
if (keys.length === 0) {
return true;
} else {
var doneHostCount = 0;
keys.forEach(function(key) {
var doneInterceptorCount = 0;
interceptors[key].forEach(function(interceptor) {
var isDefined = (typeof interceptor.options.requireDone !== 'undefined');
if (isDefined && interceptor.options.requireDone === false) {
doneInterceptorCount += 1;
}
});
if( doneInterceptorCount === interceptors[key].length ) {
doneHostCount += 1;
}
});
return (doneHostCount === keys.length);
}
}
function done() {
assert.ok(isDone(), "Mocks not yet satisfied:\n" + pendingMocks().join("\n"));
}
function buildFilter() {
var filteringArguments = arguments;
if (arguments[0] instanceof RegExp) {
return function(path) {
if (path) {
path = path.replace(filteringArguments[0], filteringArguments[1]);
}
return path;
};
} else if (typeof (arguments[0]) === 'function') {
return arguments[0];
}
}
function filteringPath() {
transformPathFunction = buildFilter.apply(undefined, arguments);
if (!transformPathFunction) {
throw new Error('Invalid arguments: filtering path should be a function or a regular expression');
}
return this;
}
function filteringRequestBody() {
transformRequestBodyFunction = buildFilter.apply(undefined, arguments);
if (!transformRequestBodyFunction) {
throw new Error('Invalid arguments: filtering request body should be a function or a regular expression');
}
return this;
}
function matchHeader(name, value) {
matchHeaders.push({ name: name, value: value });
return this;
}
function defaultReplyHeaders(headers) {
this._defaultReplyHeaders = headers;
return this;
}
function log(newLogger) {
logger = newLogger;
return this;
}
function _persist() {
persist = true;
return this;
}
function shouldPersist() {
return persist;
}
scope = {
get: get
, post: post
, delete: _delete
, put: put
, merge: merge
, patch: patch
, head: head
, intercept: intercept
, done: done
, isDone: isDone
, filteringPath: filteringPath
, filteringRequestBody: filteringRequestBody
, matchHeader: matchHeader
, defaultReplyHeaders: defaultReplyHeaders
, log: log
, persist: _persist
, shouldPersist: shouldPersist
, pendingMocks: pendingMocks
};
return scope;
}
function cleanAll() {
globalIntercept.removeAll();
return module.exports;
}
function loadDefs(path) {
var contents = fs.readFileSync(path);
return JSON.parse(contents);
}
function load(path) {
return define(loadDefs(path));
}
function getStatusFromDefinition(nockDef) {
// Backward compatibility for when `status` was encoded as string in `reply`.
if(!_.isUndefined(nockDef.reply)) {
// Try parsing `reply` property.
var parsedReply = parseInt(nockDef.reply, 10);
if(_.isNumber(parsedReply)) {
return parsedReply;
}
}
var DEFAULT_STATUS_OK = 200;
return nockDef.status || DEFAULT_STATUS_OK;
}
function getScopeFromDefinition(nockDef) {
// Backward compatibility for when `port` was part of definition.
if(!_.isUndefined(nockDef.port)) {
// Include `port` into scope if it doesn't exist.
var options = url.parse(nockDef.scope);
if(_.isNull(options.port)) {
return nockDef.scope + ':' + nockDef.port;
} else {
if(parseInt(options.port) !== parseInt(nockDef.port)) {
throw new Error('Mismatched port numbers in scope and port properties of nock definition.');
}
}
}
return nockDef.scope;
}
function tryJsonParse(string) {
try {
return JSON.parse(string);
} catch(err) {
return string;
}
}
function define(nockDefs) {
var nocks = [];
nockDefs.forEach(function(nockDef) {
var nscope = getScopeFromDefinition(nockDef)
, npath = nockDef.path
, method = nockDef.method.toLowerCase() || "get"
, status = getStatusFromDefinition(nockDef)
, headers = nockDef.headers || {}
, reqheaders = nockDef.reqheaders || {}
, body = nockDef.body || ''
, options = nockDef.options || {};
// We use request headers for both filtering (see below) and mocking.
// Here we are setting up mocked request headers.
options = _.clone(options) || {};
options.reqheaders = reqheaders;
// Response is not always JSON as it could be a string or binary data or
// even an array of binary buffers (e.g. when content is enconded)
var response;
if(!nockDef.response) {
response = '';
} else {
response = _.isString(nockDef.response) ? tryJsonParse(nockDef.response) : nockDef.response;
}
var nock;
if(body==="*") {
nock = startScope(nscope, options).filteringRequestBody(function() {
return "*";
})[method](nscope, "*").reply(status, response, headers);
} else {
nock = startScope(nscope, options);
// If request headers were specified filter by them.
if(reqheaders !== {}) {
for (var k in reqheaders) {
nock.matchHeader(k, reqheaders[k]);
}
}
nock.intercept(npath, method, body).reply(status, response, headers);
}
nocks.push(nock);
});
return nocks;
}
module.exports = startScope;
module.exports.cleanAll = cleanAll;
module.exports.activate = globalIntercept.activate;
module.exports.isActive = globalIntercept.isActive;
module.exports.disableNetConnect = globalIntercept.disableNetConnect;
module.exports.enableNetConnect = globalIntercept.enableNetConnect;
module.exports.load = load;
module.exports.loadDefs = loadDefs;
module.exports.define = define;
| purepennons/promises-book | node_modules/nock/lib/scope.js | JavaScript | mit | 16,397 |
(function(){var _jolokiaConstructorFunc=function($){var DEFAULT_CLIENT_PARAMS={type:"POST",jsonp:false};var GET_AJAX_PARAMS={type:"GET"};var POST_AJAX_PARAMS={type:"POST",processData:false,dataType:"json",contentType:"text/json"};var PROCESSING_PARAMS=["maxDepth","maxCollectionSize","maxObjects","ignoreErrors","canonicalNaming","serializeException","includeStackTrace","ifModifiedSince"];function Jolokia(param){if(!(this instanceof arguments.callee)){return new Jolokia(param);}
this.CLIENT_VERSION="1.2.3";var jobs=[];var agentOptions={};var pollerIsRunning=false;if(typeof param==="string"){param={url:param};}
$.extend(agentOptions,DEFAULT_CLIENT_PARAMS,param);this.request=function(request,params){var opts=$.extend({},agentOptions,params);assertNotNull(opts.url,"No URL given");var ajaxParams={};$.each(["username","password","timeout"],function(i,key){if(opts[key]){ajaxParams[key]=opts[key];}});if(ajaxParams['username']&&ajaxParams['password']){if(window.btoa){ajaxParams.beforeSend=function(xhr){var tok=ajaxParams['username']+':'+ajaxParams['password'];xhr.setRequestHeader('Authorization',"Basic "+window.btoa(tok));};}
ajaxParams.xhrFields={withCredentials:true};}
if(extractMethod(request,opts)==="post"){$.extend(ajaxParams,POST_AJAX_PARAMS);ajaxParams.data=JSON.stringify(request);ajaxParams.url=ensureTrailingSlash(opts.url);}else{$.extend(ajaxParams,GET_AJAX_PARAMS);ajaxParams.dataType=opts.jsonp?"jsonp":"json";ajaxParams.url=opts.url+"/"+constructGetUrlPath(request);}
ajaxParams.url=addProcessingParameters(ajaxParams.url,opts);if(opts.ajaxError){ajaxParams.error=opts.ajaxError;}
if(opts.success){var success_callback=constructCallbackDispatcher(opts.success);var error_callback=constructCallbackDispatcher(opts.error);ajaxParams.success=function(data){var responses=$.isArray(data)?data:[data];for(var idx=0;idx<responses.length;idx++){var resp=responses[idx];if(Jolokia.isError(resp)){error_callback(resp,idx);}else{success_callback(resp,idx);}}};$.ajax(ajaxParams);return null;}else{if(opts.jsonp){throw Error("JSONP is not supported for synchronous requests");}
ajaxParams.async=false;var xhr=$.ajax(ajaxParams);if(httpSuccess(xhr)){return $.parseJSON(xhr.responseText);}else{return null;}}};this.register=function(){if(arguments.length<2){throw"At a least one request must be provided";}
var callback=arguments[0],requests=Array.prototype.slice.call(arguments,1),job;if(typeof callback==='object'){if(callback.success&&callback.error){job={success:callback.success,error:callback.error};}else if(callback.callback){job={callback:callback.callback};}else{throw"Either 'callback' or ('success' and 'error') callback must be provided "+"when registering a Jolokia job";}
job=$.extend(job,{config:callback.config,onlyIfModified:callback.onlyIfModified});}else if(typeof callback==='function'){job={success:null,error:null,callback:callback};}else{throw"First argument must be either a callback func "+"or an object with 'success' and 'error' attributes";}
if(!requests){throw"No requests given";}
job.requests=requests;var idx=jobs.length;jobs[idx]=job;return idx;};this.unregister=function(handle){if(handle<jobs.length){jobs[handle]=undefined;}};this.jobs=function(){var ret=[],len=jobs.length;for(var i=0;i<len;i++){if(jobs[i]){ret.push(i);}}
return ret;};this.start=function(interval){interval=interval||agentOptions.fetchInterval||30000;if(pollerIsRunning){if(interval===agentOptions.fetchInterval){return;}
this.stop();}
agentOptions.fetchInterval=interval;this.timerId=setInterval(callJolokia(this,jobs),interval);pollerIsRunning=true;};this.stop=function(){if(!pollerIsRunning&&this.timerId!=undefined){return;}
clearInterval(this.timerId);this.timerId=null;pollerIsRunning=false;};this.isRunning=function(){return pollerIsRunning;};}
function callJolokia(jolokia,jobs){return function(){var errorCbs=[],successCbs=[],i,j,len=jobs.length;var requests=[];for(i=0;i<len;i++){var job=jobs[i];if(!job){continue;}
var reqsLen=job.requests.length;if(job.success){var successCb=cbSuccessClosure(job,i);var errorCb=cbErrorClosure(job,i);for(j=0;j<reqsLen;j++){requests.push(prepareRequest(job,j));successCbs.push(successCb);errorCbs.push(errorCb);}}else{var callback=cbCallbackClosure(job,jolokia);for(j=0;j<reqsLen-1;j++){requests.push(prepareRequest(job,j));successCbs.push(callback.cb);errorCbs.push(callback.cb);}
requests.push(prepareRequest(job,reqsLen-1));successCbs.push(callback.lcb);errorCbs.push(callback.lcb);}}
var opts={success:function(resp,j){return successCbs[j].apply(jolokia,[resp,j]);},error:function(resp,j){return errorCbs[j].apply(jolokia,[resp,j]);}};return jolokia.request(requests,opts);};}
function prepareRequest(job,idx){var request=job.requests[idx],config=job.config||{},extra=job.onlyIfModified&&job.lastModified?{ifModifiedSince:job.lastModified}:{};request.config=$.extend({},config,request.config,extra);return request;}
function cbCallbackClosure(job,jolokia){var responses=[],callback=job.callback,lastModified=0;return{cb:addResponse,lcb:function(resp,j){addResponse(resp);if(responses.length>0){job.lastModified=lastModified;callback.apply(jolokia,responses);}}};function addResponse(resp,j){if(resp.status!=304){if(lastModified==0||resp.timestamp<lastModified){lastModified=resp.timestamp;}
responses.push(resp);}}}
function cbErrorClosure(job,i){var callback=job.error;return function(resp,j){if(resp.status==304){return;}
if(callback){callback(resp,i,j)}}}
function cbSuccessClosure(job,i){var callback=job.success;return function(resp,j){if(callback){if(job.onlyIfModified){job.lastModified=resp.timestamp;}
callback(resp,i,j)}}}
function constructCallbackDispatcher(callback){if(callback==null){return function(response){console.warn("Ignoring response "+JSON.stringify(response));};}else if(callback==="ignore"){return function(){};}
var callbackArray=$.isArray(callback)?callback:[callback];return function(response,idx){callbackArray[idx%callbackArray.length](response,idx);}}
function extractMethod(request,opts){var methodGiven=opts&&opts.method?opts.method.toLowerCase():null,method;if(methodGiven){if(methodGiven==="get"){if($.isArray(request)){throw new Error("Cannot use GET with bulk requests");}
if(request.type.toLowerCase()==="read"&&$.isArray(request.attribute)){throw new Error("Cannot use GET for read with multiple attributes");}
if(request.target){throw new Error("Cannot use GET request with proxy mode");}
if(request.config){throw new Error("Cannot use GET with request specific config");}}
method=methodGiven;}else{method=$.isArray(request)||request.config||(request.type.toLowerCase()==="read"&&$.isArray(request.attribute))||request.target?"post":"get";}
if(opts.jsonp&&method==="post"){throw new Error("Can not use JSONP with POST requests");}
return method;}
function addProcessingParameters(url,opts){var sep=url.indexOf("?")>0?"&":"?";$.each(PROCESSING_PARAMS,function(i,key){if(opts[key]!=null){url+=sep+key+"="+opts[key];sep="&";}});return url;}
function constructGetUrlPath(request){var type=request.type;assertNotNull(type,"No request type given for building a GET request");type=type.toLowerCase();var extractor=GET_URL_EXTRACTORS[type];assertNotNull(extractor,"Unknown request type "+type);var result=extractor(request);var parts=result.parts||[];var url=type;$.each(parts,function(i,v){url+="/"+Jolokia.escape(v)});if(result.path){url+=(result.path[0]=='/'?"":"/")+result.path;}
console.log(url);return url;}
function ensureTrailingSlash(url){return url.replace(/\/*$/,"/");}
var GET_URL_EXTRACTORS={"read":function(request){if(request.attribute==null){return{parts:[request.mbean,'*'],path:request.path};}else{return{parts:[request.mbean,request.attribute],path:request.path};}},"write":function(request){return{parts:[request.mbean,request.attribute,valueToString(request.value)],path:request.path};},"exec":function(request){var ret=[request.mbean,request.operation];if(request.arguments&&request.arguments.length>0){$.each(request.arguments,function(index,value){ret.push(valueToString(value));});}
return{parts:ret};},"version":function(){return{};},"search":function(request){return{parts:[request.mbean]};},"list":function(request){return{path:request.path};}};function valueToString(value){if(value==null){return"[null]";}
if($.isArray(value)){var ret="";for(var i=0;i<value.length;i++){ret+=value==null?"[null]":singleValueToString(value[i]);if(i<value.length-1){ret+=",";}}
return ret;}else{return singleValueToString(value);}}
function singleValueToString(value){if(typeof value==="string"&&value.length==0){return"\"\"";}else{return value.toString();}}
function httpSuccess(xhr){try{return!xhr.status&&location.protocol==="file:"||xhr.status>=200&&xhr.status<300||xhr.status===304||xhr.status===1223;}catch(e){}
return false;}
function assertNotNull(object,message){if(object==null){throw new Error(message);}}
Jolokia.prototype.escape=Jolokia.escape=function(part){return encodeURIComponent(part.replace(/!/g,"!!").replace(/\//g,"!/"));};Jolokia.prototype.isError=Jolokia.isError=function(resp){return resp.status==null||resp.status!=200;};return Jolokia;};(function(root,factory){if(typeof define==='function'&&define.amd){define(["jquery"],factory);}else{root.Jolokia=factory(root.jQuery);}}(this,function(jQuery){return _jolokiaConstructorFunc(jQuery);}));}()); | kamkie/micro-service-example | webui-monitor/app/js/third-party/jolokia/jolokia-min.js | JavaScript | mit | 9,282 |
/**
* Main controller for Ghost frontend
*/
/*global require, module */
var _ = require('lodash'),
api = require('../api'),
rss = require('../data/xml/rss'),
path = require('path'),
config = require('../config'),
errors = require('../errors'),
filters = require('../filters'),
Promise = require('bluebird'),
template = require('../helpers/template'),
routeMatch = require('path-match')(),
frontendControllers,
staticPostPermalink = routeMatch('/:slug/:edit?');
function getPostPage(options) {
return api.settings.read('postsPerPage').then(function then(response) {
var postPP = response.settings[0],
postsPerPage = parseInt(postPP.value, 10);
// No negative posts per page, must be number
if (!isNaN(postsPerPage) && postsPerPage > 0) {
options.limit = postsPerPage;
}
options.include = 'author,tags,fields';
return api.posts.browse(options);
});
}
/**
* formats variables for handlebars in multi-post contexts.
* If extraValues are available, they are merged in the final value
* @return {Object} containing page variables
*/
function formatPageResponse(posts, page, extraValues) {
extraValues = extraValues || {};
var resp = {
posts: posts,
pagination: page.meta.pagination
};
return _.extend(resp, extraValues);
}
/**
* similar to formatPageResponse, but for single post pages
* @return {Object} containing page variables
*/
function formatResponse(post) {
return {
post: post
};
}
function handleError(next) {
return function handleError(err) {
// If we've thrown an error message of type: 'NotFound' then we found no path match.
if (err.errorType === 'NotFoundError') {
return next();
}
return next(err);
};
}
function setResponseContext(req, res, data) {
var contexts = [],
pageParam = req.params.page !== undefined ? parseInt(req.params.page, 10) : 1,
tagPattern = new RegExp('^\\/' + config.routeKeywords.tag + '\\/'),
authorPattern = new RegExp('^\\/' + config.routeKeywords.author + '\\/'),
privatePattern = new RegExp('^\\/' + config.routeKeywords.private + '\\/'),
indexPattern = new RegExp('^\\/' + config.routeKeywords.page + '\\/'),
homePattern = new RegExp('^\\/$');
// paged context
if (!isNaN(pageParam) && pageParam > 1) {
contexts.push('paged');
}
if (indexPattern.test(res.locals.relativeUrl)) {
contexts.push('index');
} else if (homePattern.test(res.locals.relativeUrl)) {
contexts.push('home');
contexts.push('index');
} else if (/^\/rss\//.test(res.locals.relativeUrl)) {
contexts.push('rss');
} else if (privatePattern.test(res.locals.relativeUrl)) {
contexts.push('private');
} else if (tagPattern.test(res.locals.relativeUrl)) {
contexts.push('tag');
} else if (authorPattern.test(res.locals.relativeUrl)) {
contexts.push('author');
} else if (data && data.post && data.post.page) {
contexts.push('page');
} else {
contexts.push('post');
}
res.locals.context = contexts;
}
// Add Request context parameter to the data object
// to be passed down to the templates
function setReqCtx(req, data) {
(Array.isArray(data) ? data : [data]).forEach(function forEach(d) {
d.secure = req.secure;
});
}
/**
* Returns the paths object of the active theme via way of a promise.
* @return {Promise} The promise resolves with the value of the paths.
*/
function getActiveThemePaths() {
return api.settings.read({
key: 'activeTheme',
context: {
internal: true
}
}).then(function then(response) {
var activeTheme = response.settings[0],
paths = config.paths.availableThemes[activeTheme.value];
return paths;
});
}
/*
* Sets the response context around a post and renders it
* with the current theme's post view. Used by post preview
* and single post methods.
* Returns a function that takes the post to be rendered.
*/
function renderPost(req, res) {
return function renderPost(post) {
return getActiveThemePaths().then(function then(paths) {
var view = template.getThemeViewForPost(paths, post),
response = formatResponse(post);
setResponseContext(req, res, response);
res.render(view, response);
});
};
}
function renderChannel(channelOpts) {
channelOpts = channelOpts || {};
return function renderChannel(req, res, next) {
var pageParam = req.params.page !== undefined ? parseInt(req.params.page, 10) : 1,
options = {
page: pageParam
},
hasSlug,
filter, filterKey;
// Add the slug if it exists in the route
if (channelOpts.route.indexOf(':slug') !== -1) {
options[channelOpts.name] = req.params.slug;
hasSlug = true;
}
function createUrl(page) {
var url = config.paths.subdir + channelOpts.route;
if (hasSlug) {
url = url.replace(':slug', options[channelOpts.name]);
}
if (page && page > 1) {
url += 'page/' + page + '/';
}
return url;
}
if (isNaN(pageParam) || pageParam < 1 || (req.params.page !== undefined && pageParam === 1)) {
return res.redirect(createUrl());
}
return getPostPage(options).then(function then(page) {
// If page is greater than number of pages we have, redirect to last page
if (pageParam > page.meta.pagination.pages) {
return res.redirect(createUrl(page.meta.pagination.pages));
}
setReqCtx(req, page.posts);
if (channelOpts.filter && page.meta.filters[channelOpts.filter]) {
filterKey = page.meta.filters[channelOpts.filter];
filter = (_.isArray(filterKey)) ? filterKey[0] : filterKey;
setReqCtx(req, filter);
}
filters.doFilter('prePostsRender', page.posts, res.locals).then(function then(posts) {
getActiveThemePaths().then(function then(paths) {
var view = 'index',
result,
extra = {};
if (channelOpts.firstPageTemplate && paths.hasOwnProperty(channelOpts.firstPageTemplate + '.hbs')) {
view = (pageParam > 1) ? 'index' : channelOpts.firstPageTemplate;
} else if (channelOpts.slugTemplate) {
view = template.getThemeViewForChannel(paths, channelOpts.name, options[channelOpts.name]);
} else if (paths.hasOwnProperty(channelOpts.name + '.hbs')) {
view = channelOpts.name;
}
if (channelOpts.filter) {
extra[channelOpts.name] = (filterKey) ? filter : '';
if (!extra[channelOpts.name]) {
return next();
}
result = formatPageResponse(posts, page, extra);
} else {
result = formatPageResponse(posts, page);
}
setResponseContext(req, res);
res.render(view, result);
});
});
}).catch(handleError(next));
};
}
frontendControllers = {
homepage: renderChannel({
name: 'home',
route: '/',
firstPageTemplate: 'home'
}),
tag: renderChannel({
name: 'tag',
route: '/' + config.routeKeywords.tag + '/:slug/',
filter: 'tags',
slugTemplate: true
}),
author: renderChannel({
name: 'author',
route: '/' + config.routeKeywords.author + '/:slug/',
filter: 'author',
slugTemplate: true
}),
preview: function preview(req, res, next) {
var params = {
uuid: req.params.uuid,
status: 'all',
include: 'author,tags,fields'
};
api.posts.read(params).then(function then(result) {
var post = result.posts[0];
if (!post) {
return next();
}
if (post.status === 'published') {
return res.redirect(301, config.urlFor('post', {post: post}));
}
setReqCtx(req, post);
filters.doFilter('prePostsRender', post, res.locals)
.then(renderPost(req, res));
}).catch(handleError(next));
},
single: function single(req, res, next) {
var postPath = req.path,
params,
usingStaticPermalink = false;
api.settings.read('permalinks').then(function then(response) {
var permalink = response.settings[0].value,
editFormat,
postLookup,
match;
editFormat = permalink.substr(permalink.length - 1) === '/' ? ':edit?' : '/:edit?';
// Convert saved permalink into a path-match function
permalink = routeMatch(permalink + editFormat);
match = permalink(postPath);
// Check if the path matches the permalink structure.
//
// If there are no matches found we then
// need to verify it's not a static post,
// and test against that permalink structure.
if (match === false) {
match = staticPostPermalink(postPath);
// If there are still no matches then return.
if (match === false) {
// Reject promise chain with type 'NotFound'
return Promise.reject(new errors.NotFoundError());
}
usingStaticPermalink = true;
}
params = match;
// Sanitize params we're going to use to lookup the post.
postLookup = _.pick(params, 'slug', 'id');
// Add author, tag and fields
postLookup.include = 'author,tags,fields,next,previous';
// Query database to find post
return api.posts.read(postLookup);
}).then(function then(result) {
var post = result.posts[0],
postUrl = (params.edit) ? postPath.replace(params.edit + '/', '') : postPath;
if (!post) {
return next();
}
console.log(post);
function render() {
// If we're ready to render the page but the last param is 'edit' then we'll send you to the edit page.
if (params.edit) {
params.edit = params.edit.toLowerCase();
}
if (params.edit === 'edit') {
return res.redirect(config.paths.subdir + '/ghost/editor/' + post.id + '/');
} else if (params.edit !== undefined) {
// reject with type: 'NotFound'
return Promise.reject(new errors.NotFoundError());
}
setReqCtx(req, post);
filters.doFilter('prePostsRender', post, res.locals)
.then(renderPost(req, res));
}
// If we've checked the path with the static permalink structure
// then the post must be a static post.
// If it is not then we must return.
if (usingStaticPermalink) {
if (post.page) {
return render();
}
return next();
}
// Check if the url provided with the post object matches req.path
// If it does, render the post
// If not, return 404
if (post.url && post.url === postUrl) {
return render();
} else {
return next();
}
}).catch(handleError(next));
},
rss: rss,
private: function private(req, res) {
var defaultPage = path.resolve(config.paths.adminViews, 'private.hbs');
return getActiveThemePaths().then(function then(paths) {
var data = {};
if (res.error) {
data.error = res.error;
}
setResponseContext(req, res);
if (paths.hasOwnProperty('private.hbs')) {
return res.render('private', data);
} else {
return res.render(defaultPage, data);
}
});
}
};
module.exports = frontendControllers;
| JuceQ/openshit-ghost-zh-cn | node_modules/ghost/core/server/controllers/frontend.js | JavaScript | mit | 12,841 |
export default {
title: 'Me',
welcome: `Hi {email}. This is your secret page.`
};
| Tzitzian/Oppex | src/client/me/intl/en.js | JavaScript | mit | 86 |
// Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for details.
namespace mshtml
{
using System.Runtime.InteropServices;
[ComImport, Guid("3050F516-98B5-11CF-BB82-00AA00BDCE0B"),]
public interface HTMLTitleElement : DispHTMLTitleElement
{
}
}
| adilmughal/OpenLiveWriter | src/managed/OpenLiveWriter.Interop.Mshtml/mshtml/HTMLTitleElement.cs | C# | mit | 342 |
\usepackage{helvet} % Helvetica font
\renewcommand*\familydefault{\sfdefault} % Use the sans serif version of the font
\usepackage[T1]{fontenc}
\usepackage[none]{hyphenat}
\usepackage{setspace}
\doublespacing
\setlength{\parskip}{1em}
\usepackage{lineno}
\usepackage{pdfpages}
| Xdai8923/Kozich_ReAlnalysis_AEM_2013 | submission/header.tex | TeX | mit | 281 |
// Type definitions for nuclear-js 1.4
// Project: https://github.com/optimizely/nuclear-js
// Definitions by: Pat Lillis <https://github.com/patlillis>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
import * as _Immutable from 'immutable';
// Disable automatic exports.
export {};
// NuclearJS re-exports everything in ImmutableJS.
export import Immutable = _Immutable;
interface ReactorConfig {
/** If true it will log the entire app state for every dispatch. */
debug?: boolean;
}
// Getters have a really complex, recursive type that can't be represented
// in TypeScript, but at a high level they are all Arrays.
type Getter = any[];
interface ReactMixin {
getInitialState(): any;
componentDidMount(): void;
componentWillUnmount(): void;
}
/**
* State is stored in NuclearJS Reactors. Reactors contain a `state` object
* which is an Immutable.Map
*
* The only way Reactors can change state is by reacting to messages. To
* update state, Reactor's dispatch messages to all registered stores, and
* the store returns it's new state based on the message
*/
export interface Reactor {
prevReactorState: any;
reactorState: any;
observerState: any;
ReactMixin: ReactMixin;
/**
* Dispatches a message to all registered Stores.
*
* This process is done synchronously, all registered Stores are passed
* this message and all components are re-evaluated (efficiently). After
* a dispatch, a Reactor will emit the new state on the
* reactor.changeEmitter.
*/
dispatch(actionType: string, payload?: any): void;
/**
* Allows multiple dispatches within the `fn` function before notifying
* any observers.
*/
batch(fn: () => void): void;
/**
* Returns the immutable value for some KeyPath or Getter in the reactor
* state.
*
* Returns `undefined` if a keyPath doesn't have a value.
*/
evaluate(getter: Getter): any;
/**
* Returns a plain JS value for some KeyPath or Getter in the reactor
* state.
*
* Returns `undefined` if a keyPath doesn't have a value.
*/
evaluateToJS(getter: Getter): any;
/**
* Adds a change observer that is invoked whenever any part of the
* reactor state changes.
*/
observe(handler: () => void): () => void;
/**
* Adds a change observer that is invoked whenever any dependencies of
* the getter change.
*
* @returns An "unsubscribe" function
*/
observe(getter: Getter, handler: (value?: any) => void): () => void;
/**
* Removes the change observer for the getter.
*/
unobserve(getter: Getter, handler: (value?: any) => void): void;
/**
* Returns a plain JavaScript object representing the application state.
*
* By default this maps over all stores and returns `toJS(storeState)`.
*/
serialize(): any;
/**
* Takes a plain JavaScript object and merges into the reactor state,
* using `store.deserialize()`.
*
* This can be useful if you need to load data already on the page.
*/
loadState(state: any): void;
/**
* Registers stores.
*/
registerStores(stores: { [storeName: string]: Store }): void;
/**
* Replace store implementation (handlers) without modifying the app
* state or calling `getInitialState`.
*
* Useful for hot reloading
*/
replaceStores(stores: { [storeName: string]: Store }): void;
/**
* Resets the state of a reactor and returns it back to initial state.
*/
reset(): void;
}
export const Reactor: {
/**
* State is stored in NuclearJS Reactors. Reactors contain a `state` object
* which is an Immutable.Map
*
* The only way Reactors can change state is by reacting to messages. To
* update state, Reactor's dispatch messages to all registered stores, and
* the store returns it's new state based on the message
*/
new (config?: ReactorConfig): Reactor;
/**
* State is stored in NuclearJS Reactors. Reactors contain a `state` object
* which is an Immutable.Map
*
* The only way Reactors can change state is by reacting to messages. To
* update state, Reactor's dispatch messages to all registered stores, and
* the store returns it's new state based on the message
*/
(config?: ReactorConfig): Reactor;
};
/**
* A Store defines how a certain domain of the application should respond to
* actions taken on the whole system. They manage their own section of the
* entire app state and have no knowledge about the other parts of the
* application state.
*/
export interface Store<T = any> extends StoreLike<T> {
/**
* Takes a current reactor state, action type and payload, does the
* reaction, and returns the new state.
*/
handle(state: T, actionType: string, payload?: any): T;
/**
* Binds an action type to a handler.
*/
on(actionType: string, handler: (state: T, payload?: any) => T): void;
/**
* Pure function taking the current state of store and returning the new
* state after a NuclearJS reactor has been reset
*/
handleReset(this: Store<T>, state: T): T;
/**
* Serializes store state to plain JSON serializable JavaScript.
*/
serialize(this: Store<T>, state: T): any;
/**
* Deserializes plain JavaScript to store state.
*/
deserialize(this: Store<T>, state: any): T;
}
/**
* Stores are initialized like:
*
* ```
* new Store({
* initialize() { ... },
* getInitialState() { ... },
* })
* ```
*
* This type defines the functions for the object passed to the
* `new Store()` constructor. In additional, all of these functions are
* available on the base `Store` object itself.
*/
interface StoreLike<T> {
/**
* Gets the initial state for this type of store
*/
getInitialState(this: Store<T>): T;
/**
* Sets up message handlers via `this.on` and to set up the initial
* state.
*/
initialize(this: Store<T>): void;
/**
* Pure function taking the current state of store and returning the new
* state after a NuclearJS reactor has been reset
*/
handleReset?(this: Store<T>, state: T): T;
/**
* Serializes store state to plain JSON serializable JavaScript.
*/
serialize?(this: Store<T>, state: T): any;
/**
* Deserializes plain JavaScript to store state.
*/
deserialize?(this: Store<T>, state: any): T;
}
export const Store: {
/**
* A Store defines how a certain domain of the application should respond to
* actions taken on the whole system. They manage their own section of the
* entire app state and have no knowledge about the other parts of the
* application state.
*/
new <T>(config: StoreLike<T>): Store<T>;
/**
* A Store defines how a certain domain of the application should respond to
* actions taken on the whole system. They manage their own section of the
* entire app state and have no knowledge about the other parts of the
* application state.
*/
<T>(config: StoreLike<T>): Store<T>;
};
/**
* Checks if something is simply a keyPath and not a getter.
*/
export function isKeyPath(toTest: any): boolean;
/**
* Checks if something is a getter literal.
*
* For example, `['dep1', 'dep2', function(dep1, dep2) {...}]`.
*/
export function isGetter(toTest: any): boolean;
/**
* Converts an Immutable Sequence to JS object.
*
* Can be called on any type.
*/
export function toJS(arg: any): any;
/**
* Converts a JS object to an Immutable object, if it's already Immutable its a
* no-op.
*/
export function toImmutable(arg: any): any;
/**
* Returns true if the value is an ImmutableJS data structure.
*/
export function isImmutable(arg: any): boolean;
export function createReactMixin(reactor: Reactor): ReactMixin;
| dsebastien/DefinitelyTyped | types/nuclear-js/index.d.ts | TypeScript | mit | 7,984 |
'''
Hello student. Thank you for downloading a CORGIS library. However, you do not need to open this library. Instead you should use the following:
import graduates
If you opened the file because you are curious how this library works, then well done! We hope that you find it a useful learning experience. However, you should know that this code is meant to solve somewhat esoteric pedagogical problems, so it is often not best practices.
'''
import sys as _sys
import os as _os
import json as _json
import sqlite3 as _sql
import difflib as _difflib
class _Constants(object):
'''
Global singleton object to hide some of the constants; some IDEs reveal internal module details very aggressively, and there's no other way to hide stuff.
'''
_HEADER = {'User-Agent':
'CORGIS Graduates library for educational purposes'}
_PYTHON_3 = _sys.version_info >= (3, 0)
_TEST = False
_HARDWARE = 1000
if _Constants._PYTHON_3:
import urllib.request as _request
from urllib.parse import quote_plus as _quote_plus
from urllib.error import HTTPError as _HTTPError
else:
import urllib2 as _urllib2
from urllib import quote_plus as _quote_plus
from urllib2 import HTTPError as _HTTPError
class DatasetException(Exception):
''' Thrown when there is an error loading the dataset for some reason.'''
pass
_Constants._DATABASE_NAME = "graduates.db"
if not _os.access(_Constants._DATABASE_NAME, _os.F_OK):
raise DatasetException("Error! Could not find a \"{0}\" file. Make sure that there is a \"{0}\" in the same directory as \"{1}.py\"! Spelling is very important here.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.R_OK):
raise DatasetException("Error! Could not read the \"{0}\" file. Make sure that it readable by changing its permissions. You may need to get help from your instructor.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.W_OK):
_sys.stderr.write('The local cache (\" \") will not be updated. Make sure that it is writable by changing its permissions. You may need to get help from your instructor.\n'.format(_Constants._DATABASE_NAME))
_sys.stderr.flush()
_Constants._DATABASE = _sql.connect(_Constants._DATABASE_NAME)
class _Auxiliary(object):
@staticmethod
def _parse_type(value, type_func):
"""
Attempt to cast *value* into *type_func*, returning *default* if it fails.
"""
default = type_func(0)
if value is None:
return default
try:
return type_func(value)
except ValueError:
return default
@staticmethod
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_Auxiliary._byteify(key): _Auxiliary._byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._byteify(element) for element in input]
elif _Constants._PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _Constants._PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
@staticmethod
def _guess_schema(input):
if isinstance(input, dict):
return {str(key.encode('ascii', 'replace').decode('ascii')):
_Auxiliary._guess_schema(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._guess_schema(input[0])] if input else []
else:
return type(input)
################################################################################
# Domain Objects
################################################################################
################################################################################
# Interfaces
################################################################################
def get_majors():
"""
Returns information about all recorded majors.
"""
if False:
# If there was a Test version of this method, it would go here. But alas.
pass
else:
rows = _Constants._DATABASE.execute("SELECT data FROM graduates".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
################################################################################
# Internalized testing code
################################################################################
def _test_interfaces():
from pprint import pprint as _pprint
from timeit import default_timer as _default_timer
# Production test
print("Production get_majors")
start_time = _default_timer()
result = get_majors()
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
if __name__ == '__main__':
from optparse import OptionParser as _OptionParser
_parser = _OptionParser()
_parser.add_option("-t", "--test", action="store_true",
default=False,
help="Execute the interfaces to test them.")
_parser.add_option("-r", "--reset", action="store_true",
default=False,
help="Reset the cache")
(_options, _args) = _parser.parse_args()
if _options.test:
_test_interfaces()
if _options.reset:
_modify_self() | armadill-odyssey/aima-python | submissions/Capps/graduates.py | Python | mit | 5,946 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.