repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
avranju/qpid-jms | qpid-jms-client/src/main/java/org/apache/qpid/jms/selector/parser/SelectorParserImpl.java | 30987 | /* Generated By:JavaCC: Do not edit this line. SelectorParserImpl.java */
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.qpid.jms.selector.parser;
import java.io.*;
import java.util.*;
import org.apache.qpid.jms.selector.filter.*;
/**
* JMS Selector Parser generated by JavaCC
*
* Do not edit this .java file directly - it is generated from SelectorParserImpl.jj
* Edit SelectorParserImpl.jj and rebuild with the 'generate-selector-parser' profile.
*/
public class SelectorParserImpl implements SelectorParserImplConstants {
private BooleanExpression asBooleanExpression(Expression value) throws ParseException {
if (value instanceof BooleanExpression) {
return (BooleanExpression) value;
}
if (value instanceof PropertyExpression) {
return UnaryExpression.createBooleanCast( value );
}
throw new ParseException("Expression will not result in a boolean value: " + value);
}
// ----------------------------------------------------------------------------
// Grammer
// ----------------------------------------------------------------------------
final public BooleanExpression JmsSelector() throws ParseException {
Expression left=null;
left = orExpression();
{if (true) return asBooleanExpression(left);}
throw new Error("Missing return statement in function");
}
final public Expression orExpression() throws ParseException {
Expression left;
Expression right;
left = andExpression();
label_1:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case OR:
;
break;
default:
break label_1;
}
jj_consume_token(OR);
right = andExpression();
left = LogicExpression.createOR(asBooleanExpression(left), asBooleanExpression(right));
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression andExpression() throws ParseException {
Expression left;
Expression right;
left = equalityExpression();
label_2:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case AND:
;
break;
default:
break label_2;
}
jj_consume_token(AND);
right = equalityExpression();
left = LogicExpression.createAND(asBooleanExpression(left), asBooleanExpression(right));
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression equalityExpression() throws ParseException {
Expression left;
Expression right;
left = comparisonExpression();
label_3:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IS:
case 27:
case 28:
;
break;
default:
break label_3;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 27:
jj_consume_token(27);
right = comparisonExpression();
left = ComparisonExpression.createEqual(left, right);
break;
case 28:
jj_consume_token(28);
right = comparisonExpression();
left = ComparisonExpression.createNotEqual(left, right);
break;
default:
if (jj_2_1(2)) {
jj_consume_token(IS);
jj_consume_token(NULL);
left = ComparisonExpression.createIsNull(left);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IS:
jj_consume_token(IS);
jj_consume_token(NOT);
jj_consume_token(NULL);
left = ComparisonExpression.createIsNotNull(left);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression comparisonExpression() throws ParseException {
Expression left;
Expression right;
Expression low;
Expression high;
String t, u;
boolean not;
ArrayList list;
left = addExpression();
label_4:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case NOT:
case BETWEEN:
case LIKE:
case IN:
case 29:
case 30:
case 31:
case 32:
;
break;
default:
break label_4;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 29:
jj_consume_token(29);
right = addExpression();
left = ComparisonExpression.createGreaterThan(left, right);
break;
case 30:
jj_consume_token(30);
right = addExpression();
left = ComparisonExpression.createGreaterThanEqual(left, right);
break;
case 31:
jj_consume_token(31);
right = addExpression();
left = ComparisonExpression.createLessThan(left, right);
break;
case 32:
jj_consume_token(32);
right = addExpression();
left = ComparisonExpression.createLessThanEqual(left, right);
break;
case LIKE:
u=null;
jj_consume_token(LIKE);
t = stringLitteral();
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ESCAPE:
jj_consume_token(ESCAPE);
u = stringLitteral();
break;
default:
;
}
left = ComparisonExpression.createLike(left, t, u);
break;
default:
if (jj_2_2(2)) {
u=null;
jj_consume_token(NOT);
jj_consume_token(LIKE);
t = stringLitteral();
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ESCAPE:
jj_consume_token(ESCAPE);
u = stringLitteral();
break;
default:
;
}
left = ComparisonExpression.createNotLike(left, t, u);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case BETWEEN:
jj_consume_token(BETWEEN);
low = addExpression();
jj_consume_token(AND);
high = addExpression();
left = ComparisonExpression.createBetween(left, low, high);
break;
default:
if (jj_2_3(2)) {
jj_consume_token(NOT);
jj_consume_token(BETWEEN);
low = addExpression();
jj_consume_token(AND);
high = addExpression();
left = ComparisonExpression.createNotBetween(left, low, high);
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case IN:
jj_consume_token(IN);
jj_consume_token(33);
t = stringLitteral();
list = new ArrayList();
list.add( t );
label_5:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 34:
;
break;
default:
break label_5;
}
jj_consume_token(34);
t = stringLitteral();
list.add( t );
}
jj_consume_token(35);
left = ComparisonExpression.createInFilter(left, list);
break;
default:
if (jj_2_4(2)) {
jj_consume_token(NOT);
jj_consume_token(IN);
jj_consume_token(33);
t = stringLitteral();
list = new ArrayList();
list.add( t );
label_6:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 34:
;
break;
default:
break label_6;
}
jj_consume_token(34);
t = stringLitteral();
list.add( t );
}
jj_consume_token(35);
left = ComparisonExpression.createNotInFilter(left, list);
} else {
jj_consume_token(-1);
throw new ParseException();
}
}
}
}
}
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression addExpression() throws ParseException {
Expression left;
Expression right;
left = multExpr();
label_7:
while (true) {
if (jj_2_5(2147483647)) {
;
} else {
break label_7;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 36:
jj_consume_token(36);
right = multExpr();
left = ArithmeticExpression.createPlus(left, right);
break;
case 37:
jj_consume_token(37);
right = multExpr();
left = ArithmeticExpression.createMinus(left, right);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression multExpr() throws ParseException {
Expression left;
Expression right;
left = unaryExpr();
label_8:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 38:
case 39:
case 40:
;
break;
default:
break label_8;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 38:
jj_consume_token(38);
right = unaryExpr();
left = ArithmeticExpression.createMultiply(left, right);
break;
case 39:
jj_consume_token(39);
right = unaryExpr();
left = ArithmeticExpression.createDivide(left, right);
break;
case 40:
jj_consume_token(40);
right = unaryExpr();
left = ArithmeticExpression.createMod(left, right);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression unaryExpr() throws ParseException {
String s=null;
Expression left=null;
if (jj_2_6(2147483647)) {
jj_consume_token(36);
left = unaryExpr();
} else {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case 37:
jj_consume_token(37);
left = unaryExpr();
left = UnaryExpression.createNegate(left);
break;
case NOT:
jj_consume_token(NOT);
left = unaryExpr();
left = UnaryExpression.createNOT( asBooleanExpression(left) );
break;
case TRUE:
case FALSE:
case NULL:
case DECIMAL_LITERAL:
case HEX_LITERAL:
case OCTAL_LITERAL:
case FLOATING_POINT_LITERAL:
case STRING_LITERAL:
case ID:
case QUOTED_ID:
case 33:
left = primaryExpr();
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public Expression primaryExpr() throws ParseException {
Expression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case TRUE:
case FALSE:
case NULL:
case DECIMAL_LITERAL:
case HEX_LITERAL:
case OCTAL_LITERAL:
case FLOATING_POINT_LITERAL:
case STRING_LITERAL:
left = literal();
break;
case ID:
case QUOTED_ID:
left = variable();
break;
case 33:
jj_consume_token(33);
left = orExpression();
jj_consume_token(35);
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public ConstantExpression literal() throws ParseException {
Token t;
String s;
ConstantExpression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case STRING_LITERAL:
s = stringLitteral();
left = new ConstantExpression(s);
break;
case DECIMAL_LITERAL:
t = jj_consume_token(DECIMAL_LITERAL);
left = ConstantExpression.createFromDecimal(t.image);
break;
case HEX_LITERAL:
t = jj_consume_token(HEX_LITERAL);
left = ConstantExpression.createFromHex(t.image);
break;
case OCTAL_LITERAL:
t = jj_consume_token(OCTAL_LITERAL);
left = ConstantExpression.createFromOctal(t.image);
break;
case FLOATING_POINT_LITERAL:
t = jj_consume_token(FLOATING_POINT_LITERAL);
left = ConstantExpression.createFloat(t.image);
break;
case TRUE:
jj_consume_token(TRUE);
left = ConstantExpression.TRUE;
break;
case FALSE:
jj_consume_token(FALSE);
left = ConstantExpression.FALSE;
break;
case NULL:
jj_consume_token(NULL);
left = ConstantExpression.NULL;
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
final public String stringLitteral() throws ParseException {
Token t;
StringBuffer rc = new StringBuffer();
boolean first=true;
t = jj_consume_token(STRING_LITERAL);
// Decode the sting value.
String image = t.image;
for( int i=1; i < image.length()-1; i++ ) {
char c = image.charAt(i);
if( c == '\u005c'' )
i++;
rc.append(c);
}
{if (true) return rc.toString();}
throw new Error("Missing return statement in function");
}
final public PropertyExpression variable() throws ParseException {
Token t;
PropertyExpression left=null;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ID:
t = jj_consume_token(ID);
left = new PropertyExpression(t.image);
break;
case QUOTED_ID:
t = jj_consume_token(QUOTED_ID);
// Decode the string value.
StringBuffer rc = new StringBuffer();
String image = t.image;
for( int i=1; i < image.length()-1; i++ ) {
char c = image.charAt(i);
if( c == '"' )
i++;
rc.append(c);
}
{if (true) return new PropertyExpression(rc.toString());}
break;
default:
jj_consume_token(-1);
throw new ParseException();
}
{if (true) return left;}
throw new Error("Missing return statement in function");
}
private boolean jj_2_1(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_1(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_2(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_2(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_3(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_3(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_4(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_4(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_5(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_5(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_2_6(int xla) {
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_6(); }
catch(LookaheadSuccess ls) { return true; }
}
private boolean jj_3R_53() {
if (jj_scan_token(LIKE)) return true;
if (jj_3R_38()) return true;
Token xsp;
xsp = jj_scanpos;
if (jj_3R_58()) jj_scanpos = xsp;
return false;
}
private boolean jj_3R_27() {
if (jj_scan_token(DECIMAL_LITERAL)) return true;
return false;
}
private boolean jj_3R_36() {
if (jj_3R_39()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_40()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_52() {
if (jj_scan_token(32)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_57() {
if (jj_scan_token(37)) return true;
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3_5() {
Token xsp;
xsp = jj_scanpos;
if (jj_scan_token(36)) {
jj_scanpos = xsp;
if (jj_scan_token(37)) return true;
}
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3R_26() {
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_51() {
if (jj_scan_token(31)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_35() {
if (jj_scan_token(QUOTED_ID)) return true;
return false;
}
private boolean jj_3R_56() {
if (jj_scan_token(36)) return true;
if (jj_3R_9()) return true;
return false;
}
private boolean jj_3R_37() {
if (jj_scan_token(OR)) return true;
if (jj_3R_36()) return true;
return false;
}
private boolean jj_3R_23() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_26()) {
jj_scanpos = xsp;
if (jj_3R_27()) {
jj_scanpos = xsp;
if (jj_3R_28()) {
jj_scanpos = xsp;
if (jj_3R_29()) {
jj_scanpos = xsp;
if (jj_3R_30()) {
jj_scanpos = xsp;
if (jj_3R_31()) {
jj_scanpos = xsp;
if (jj_3R_32()) {
jj_scanpos = xsp;
if (jj_3R_33()) return true;
}
}
}
}
}
}
}
return false;
}
private boolean jj_3R_50() {
if (jj_scan_token(30)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_34() {
if (jj_scan_token(ID)) return true;
return false;
}
private boolean jj_3R_48() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_56()) {
jj_scanpos = xsp;
if (jj_3R_57()) return true;
}
return false;
}
private boolean jj_3R_49() {
if (jj_scan_token(29)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_44() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_49()) {
jj_scanpos = xsp;
if (jj_3R_50()) {
jj_scanpos = xsp;
if (jj_3R_51()) {
jj_scanpos = xsp;
if (jj_3R_52()) {
jj_scanpos = xsp;
if (jj_3R_53()) {
jj_scanpos = xsp;
if (jj_3_2()) {
jj_scanpos = xsp;
if (jj_3R_54()) {
jj_scanpos = xsp;
if (jj_3_3()) {
jj_scanpos = xsp;
if (jj_3R_55()) {
jj_scanpos = xsp;
if (jj_3_4()) return true;
}
}
}
}
}
}
}
}
}
return false;
}
private boolean jj_3R_24() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_34()) {
jj_scanpos = xsp;
if (jj_3R_35()) return true;
}
return false;
}
private boolean jj_3R_43() {
if (jj_3R_9()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_48()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_25() {
if (jj_3R_36()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_37()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_22() {
if (jj_scan_token(33)) return true;
if (jj_3R_25()) return true;
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3R_21() {
if (jj_3R_24()) return true;
return false;
}
private boolean jj_3R_61() {
if (jj_scan_token(34)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_20() {
if (jj_3R_23()) return true;
return false;
}
private boolean jj_3R_19() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_20()) {
jj_scanpos = xsp;
if (jj_3R_21()) {
jj_scanpos = xsp;
if (jj_3R_22()) return true;
}
}
return false;
}
private boolean jj_3R_41() {
if (jj_3R_43()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_44()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_38() {
if (jj_scan_token(STRING_LITERAL)) return true;
return false;
}
private boolean jj_3R_15() {
if (jj_3R_19()) return true;
return false;
}
private boolean jj_3R_59() {
if (jj_scan_token(ESCAPE)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3_4() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(IN)) return true;
if (jj_scan_token(33)) return true;
if (jj_3R_38()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_61()) { jj_scanpos = xsp; break; }
}
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3_6() {
if (jj_scan_token(36)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_14() {
if (jj_scan_token(NOT)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_60() {
if (jj_scan_token(34)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3R_47() {
if (jj_scan_token(IS)) return true;
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3R_13() {
if (jj_scan_token(37)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_33() {
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3_1() {
if (jj_scan_token(IS)) return true;
if (jj_scan_token(NULL)) return true;
return false;
}
private boolean jj_3R_12() {
if (jj_scan_token(36)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_46() {
if (jj_scan_token(28)) return true;
if (jj_3R_41()) return true;
return false;
}
private boolean jj_3R_10() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_12()) {
jj_scanpos = xsp;
if (jj_3R_13()) {
jj_scanpos = xsp;
if (jj_3R_14()) {
jj_scanpos = xsp;
if (jj_3R_15()) return true;
}
}
}
return false;
}
private boolean jj_3R_32() {
if (jj_scan_token(FALSE)) return true;
return false;
}
private boolean jj_3R_55() {
if (jj_scan_token(IN)) return true;
if (jj_scan_token(33)) return true;
if (jj_3R_38()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_60()) { jj_scanpos = xsp; break; }
}
if (jj_scan_token(35)) return true;
return false;
}
private boolean jj_3R_45() {
if (jj_scan_token(27)) return true;
if (jj_3R_41()) return true;
return false;
}
private boolean jj_3R_42() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_45()) {
jj_scanpos = xsp;
if (jj_3R_46()) {
jj_scanpos = xsp;
if (jj_3_1()) {
jj_scanpos = xsp;
if (jj_3R_47()) return true;
}
}
}
return false;
}
private boolean jj_3R_31() {
if (jj_scan_token(TRUE)) return true;
return false;
}
private boolean jj_3_3() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(BETWEEN)) return true;
if (jj_3R_43()) return true;
if (jj_scan_token(AND)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_18() {
if (jj_scan_token(40)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_30() {
if (jj_scan_token(FLOATING_POINT_LITERAL)) return true;
return false;
}
private boolean jj_3R_54() {
if (jj_scan_token(BETWEEN)) return true;
if (jj_3R_43()) return true;
if (jj_scan_token(AND)) return true;
if (jj_3R_43()) return true;
return false;
}
private boolean jj_3R_39() {
if (jj_3R_41()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_42()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_17() {
if (jj_scan_token(39)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_29() {
if (jj_scan_token(OCTAL_LITERAL)) return true;
return false;
}
private boolean jj_3R_58() {
if (jj_scan_token(ESCAPE)) return true;
if (jj_3R_38()) return true;
return false;
}
private boolean jj_3_2() {
if (jj_scan_token(NOT)) return true;
if (jj_scan_token(LIKE)) return true;
if (jj_3R_38()) return true;
Token xsp;
xsp = jj_scanpos;
if (jj_3R_59()) jj_scanpos = xsp;
return false;
}
private boolean jj_3R_16() {
if (jj_scan_token(38)) return true;
if (jj_3R_10()) return true;
return false;
}
private boolean jj_3R_11() {
Token xsp;
xsp = jj_scanpos;
if (jj_3R_16()) {
jj_scanpos = xsp;
if (jj_3R_17()) {
jj_scanpos = xsp;
if (jj_3R_18()) return true;
}
}
return false;
}
private boolean jj_3R_40() {
if (jj_scan_token(AND)) return true;
if (jj_3R_39()) return true;
return false;
}
private boolean jj_3R_28() {
if (jj_scan_token(HEX_LITERAL)) return true;
return false;
}
private boolean jj_3R_9() {
if (jj_3R_10()) return true;
Token xsp;
while (true) {
xsp = jj_scanpos;
if (jj_3R_11()) { jj_scanpos = xsp; break; }
}
return false;
}
/** Generated Token Manager. */
public SelectorParserImplTokenManager token_source;
SimpleCharStream jj_input_stream;
/** Current token. */
public Token token;
/** Next token. */
public Token jj_nt;
private int jj_ntk;
private Token jj_scanpos, jj_lastpos;
private int jj_la;
/** Constructor with InputStream. */
public SelectorParserImpl(java.io.InputStream stream) {
this(stream, null);
}
/** Constructor with InputStream and supplied encoding */
public SelectorParserImpl(java.io.InputStream stream, String encoding) {
try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source = new SelectorParserImplTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream) {
ReInit(stream, null);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream, String encoding) {
try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Constructor. */
public SelectorParserImpl(java.io.Reader stream) {
jj_input_stream = new SimpleCharStream(stream, 1, 1);
token_source = new SelectorParserImplTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(java.io.Reader stream) {
jj_input_stream.ReInit(stream, 1, 1);
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
}
/** Constructor with generated Token Manager. */
public SelectorParserImpl(SelectorParserImplTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
}
/** Reinitialise. */
public void ReInit(SelectorParserImplTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
}
private Token jj_consume_token(int kind) throws ParseException {
Token oldToken;
if ((oldToken = token).next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
if (token.kind == kind) {
return token;
}
token = oldToken;
throw generateParseException();
}
static private final class LookaheadSuccess extends java.lang.Error { }
final private LookaheadSuccess jj_ls = new LookaheadSuccess();
private boolean jj_scan_token(int kind) {
if (jj_scanpos == jj_lastpos) {
jj_la--;
if (jj_scanpos.next == null) {
jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
} else {
jj_lastpos = jj_scanpos = jj_scanpos.next;
}
} else {
jj_scanpos = jj_scanpos.next;
}
if (jj_scanpos.kind != kind) return true;
if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
return false;
}
/** Get the next Token. */
final public Token getNextToken() {
if (token.next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
return token;
}
/** Get the specific Token. */
final public Token getToken(int index) {
Token t = token;
for (int i = 0; i < index; i++) {
if (t.next != null) t = t.next;
else t = t.next = token_source.getNextToken();
}
return t;
}
private int jj_ntk() {
if ((jj_nt=token.next) == null)
return (jj_ntk = (token.next=token_source.getNextToken()).kind);
else
return (jj_ntk = jj_nt.kind);
}
/** Generate ParseException. */
public ParseException generateParseException() {
Token errortok = token.next;
int line = errortok.beginLine, column = errortok.beginColumn;
String mess = (errortok.kind == 0) ? tokenImage[0] : errortok.image;
return new ParseException("Parse error at line " + line + ", column " + column + ". Encountered: " + mess);
}
/** Enable tracing. */
final public void enable_tracing() {
}
/** Disable tracing. */
final public void disable_tracing() {
}
}
| apache-2.0 |
HebaKhaled/bposs | src/com.mentor.nucleus.bp.model.compare/src/com/mentor/nucleus/bp/model/compare/contentmergeviewer/ModelMergeViewer.java | 1964 | package com.mentor.nucleus.bp.model.compare.contentmergeviewer;
//=====================================================================
//
//File: $RCSfile: ModelMergeViewer.java,v $
//Version: $Revision: 1.2 $
//Modified: $Date: 2013/01/17 03:35:34 $
//
//(c) Copyright 2013-2014 by Mentor Graphics Corp. All rights reserved.
//
//=====================================================================
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
//=====================================================================
import org.eclipse.jface.viewers.Viewer;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import com.mentor.nucleus.bp.core.Ooaofooa;
public abstract class ModelMergeViewer extends Viewer {
public static int LEFT = 0;
public static int RIGHT = 1;
public static int ANCESTOR = 2;
private Object key;
private int type;
private Ooaofooa compareRoot;
public abstract Control createControl(Composite parent);
public Object getKey() {
return key;
}
public int getType() {
return type;
}
public Ooaofooa getCompareRoot() {
return compareRoot;
}
public void setKey(Object key) {
this.key = key;
}
public void setType(int type) {
this.type = type;
}
public void setCompareRoot(Ooaofooa compareRoot) {
this.compareRoot = compareRoot;
}
public abstract String getTitle();
}
| apache-2.0 |
DannyEaton/growify | app/components/settings-component.ts | 225 | /**
* Created by Daniel Eaton on 12/11/2016.
*/
import
{Component} from "@angular/core";
import {Router} from "@angular/router";
@Component({
templateUrl: "./templates/settings.php"
})
export class SettingsComponent { }
| apache-2.0 |
hakanu/iftar | _posts_/vakit/BREZILYA/JUIZ_DE_FORA/2017-02-01-.markdown | 332 | ---
layout: vakit_dashboard
title: JUIZ_DE_FORA, BREZILYA için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç
permalink: /BREZILYA/JUIZ_DE_FORA/
---
<script type="text/javascript">
var GLOBAL_COUNTRY = 'BREZILYA';
var GLOBAL_CITY = 'JUIZ_DE_FORA';
var GLOBAL_STATE = '';
var lat = 72;
var lon = 21;
</script>
| apache-2.0 |
andreabertagnolli/orika | tests/src/main/java/ma/glasnost/orika/test/community/Issue44TestCase.java | 8357 | /*
* Orika - simpler, better and faster Java bean mapping
*
* Copyright (C) 2011-2013 Orika authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ma.glasnost.orika.test.community;
import static java.util.Arrays.asList;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import java.util.ArrayList;
import java.util.List;
import ma.glasnost.orika.CustomConverter;
import ma.glasnost.orika.MapperFactory;
import ma.glasnost.orika.MappingContext;
import ma.glasnost.orika.impl.ConfigurableMapper;
import ma.glasnost.orika.metadata.Type;
import org.junit.Test;
/**
* Allow converters for Lists (or other collections).
* <p>
*
* @see <a href="https://code.google.com/archive/p/orika/issues/44">https://code.google.com/archive/p/orika/</a>
*/
public class Issue44TestCase {
@Test
public void shouldMapCollection() {
ConfigurableMapper mapper = new ConfigurableMapper() {
@Override
protected void configure(MapperFactory factory) {
factory.classMap(Order.class, OrderDto.class).byDefault().register();
factory.classMap(Product.class, ProductDto.class).byDefault().register();
}
};
Order order = new Order();
Product product = new Product();
product.setName("myName");
order.setProducts(asList(product));
OrderDto orderDto = mapper.map(order, OrderDto.class);
assertThat(orderDto.getProducts(), hasSize(1));
assertThat(orderDto.getProducts().get(0).getName(), is(equalTo("myName")));
}
@Test
public void shouldMapCollectionWithConverter() {
ConfigurableMapper mapper = new ConfigurableMapper() {
@Override
protected void configure(MapperFactory factory) {
factory.getConverterFactory().registerConverter("productToName", new CustomConverter<List<Product>, List<String>>() {
public List<String> convert(List<Product> source, Type<? extends List<String>> destinationType, MappingContext context) {
ArrayList<String> list = new ArrayList<String>(source.size());
for (Product product : source) {
list.add(product.getName());
}
return list;
}
});
factory.classMap(Order.class, OrderDto.class)
.fieldMap("products", "productNames")
.converter("productToName")
.add()
.register();
factory.classMap(Product.class, ProductDto.class).byDefault().register();
}
};
Order order = new Order();
Product product = new Product();
product.setName("myName");
order.setProducts(asList(product));
OrderDto orderDto = mapper.map(order, OrderDto.class);
assertThat(orderDto.getProductNames(), hasSize(1));
assertThat(orderDto.getProductNames().get(0), is(equalTo("myName")));
}
@Test
public void shouldMapCollectionWithElementConverter_ToCollection() {
ConfigurableMapper mapper = new ConfigurableMapper() {
@Override
protected void configure(MapperFactory factory) {
factory.getConverterFactory().registerConverter("productToName", new CustomConverter<Product, String>() {
public String convert(Product source, Type<? extends String> destinationType, MappingContext context) {
return source.getName();
}
});
factory.classMap(Order.class, OrderDto.class)
.fieldMap("products", "productNames")
.converter("productToName")
.add()
.register();
factory.classMap(Product.class, ProductDto.class).byDefault().register();
}
};
Order order = new Order();
Product product = new Product();
product.setName("myName");
order.setProducts(asList(product));
OrderDto orderDto = mapper.map(order, OrderDto.class);
assertThat(orderDto.getProductNames(), hasSize(1));
assertThat(orderDto.getProductNames().get(0), is(equalTo("myName")));
}
@Test
public void shouldMapCollectionWithElementConverter_ToArray() {
ConfigurableMapper mapper = new ConfigurableMapper() {
@Override
protected void configure(MapperFactory factory) {
factory.getConverterFactory().registerConverter("productToName", new CustomConverter<Product, String>() {
public String convert(Product source, Type<? extends String> destinationType, MappingContext context) {
return source.getName();
}
});
factory.classMap(Order.class, OrderDto2.class)
.fieldMap("products", "productNames")
.converter("productToName")
.add()
.register();
factory.classMap(Product.class, ProductDto.class).byDefault().register();
}
};
Order order = new Order();
Product product = new Product();
product.setName("myName");
order.setProducts(asList(product));
OrderDto2 orderDto = mapper.map(order, OrderDto2.class);
assertThat(orderDto.getProductNames(), arrayWithSize(1));
assertThat(orderDto.getProductNames()[0], is(equalTo("myName")));
}
public static class Product {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static class Order {
private List<Product> products;
public List<Product> getProducts() {
return products;
}
public void setProducts(List<Product> products) {
this.products = products;
}
}
public static class ProductDto {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static class OrderDto {
private List<ProductDto> products;
private List<String> productNames;
public List<ProductDto> getProducts() {
return products;
}
public void setProducts(List<ProductDto> products) {
this.products = products;
}
public List<String> getProductNames() {
return productNames;
}
public void setProductNames(List<String> productNames) {
this.productNames = productNames;
}
}
public static class OrderDto2 {
private List<ProductDto> products;
private String[] productNames;
public List<ProductDto> getProducts() {
return products;
}
public void setProducts(List<ProductDto> products) {
this.products = products;
}
public String[] getProductNames() {
return productNames;
}
public void setProductNames(String[] productNames) {
this.productNames = productNames;
}
}
} | apache-2.0 |
googleapis/google-cloud-dotnet | apis/Google.Cloud.Spanner.Admin.Database.V1/Google.Cloud.Spanner.Admin.Database.V1.GeneratedSnippets/DatabaseAdminClient.ListDatabasesRequestObjectSnippet.g.cs | 3278 | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Spanner.Admin.Database.V1.Snippets
{
// [START spanner_v1_generated_DatabaseAdmin_ListDatabases_sync]
using Google.Api.Gax;
using Google.Cloud.Spanner.Admin.Database.V1;
using Google.Cloud.Spanner.Common.V1;
using System;
public sealed partial class GeneratedDatabaseAdminClientSnippets
{
/// <summary>Snippet for ListDatabases</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void ListDatabasesRequestObject()
{
// Create client
DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.Create();
// Initialize request argument(s)
ListDatabasesRequest request = new ListDatabasesRequest
{
ParentAsInstanceName = InstanceName.FromProjectInstance("[PROJECT]", "[INSTANCE]"),
};
// Make the request
PagedEnumerable<ListDatabasesResponse, Database> response = databaseAdminClient.ListDatabases(request);
// Iterate over all response items, lazily performing RPCs as required
foreach (Database item in response)
{
// Do something with each item
Console.WriteLine(item);
}
// Or iterate over pages (of server-defined size), performing one RPC per page
foreach (ListDatabasesResponse page in response.AsRawResponses())
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (Database item in page)
{
// Do something with each item
Console.WriteLine(item);
}
}
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<Database> singlePage = response.ReadPage(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (Database item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
}
}
// [END spanner_v1_generated_DatabaseAdmin_ListDatabases_sync]
}
| apache-2.0 |
TYKYTeam/AndroidBase | readme/README_RecyclerView.md | 1316 | # 一个好用的列表页面基类
### 1. 继承BaseRecyclerViewFragment<T>或者 BaseRecyclerViewActivity<T>, T是列表对象类型
### 2. XML文件,请引用
<include layout="@layout/layout_recyclerview"/>
或者使用相同的ID:
SwipeRefreshLayout使用@+id/swiperefresh,
RecyclerView使用@+id/recyclerView
### 3. 实现三个接口(示例:[Test_BaseRecyclerViewActivity.java](../app/src/main/java/net/liang/androidbaseapplication/Test_BaseRecyclerViewActivity.java "Test_BaseRecyclerViewActivity.java"))
>
```
// 列表适配器
// 默认一页页数为10,可使用setPageSize(int pageSize)修改
@Override
public BaseRecyclerAdapter addListAdapter() {
return new RecyclerAdapter(this, recyclerView, null);
}
```
>
```
// 请求成功时的回调方法;
// onSuccess返回的数据为完整的请求数据,需要自己拆解列表数据,添加到适配器里
@Override
public void onListSuccess(List<String> strings, int pageNo) {
//单页使用
//adapter.showList(strings);
//多页使用
adapter.showList(strings, pageNo);
}
```
>
```
// 获取网络数据接口,注意返回的是被观察者对象
@Override
public Observable<List<String>> onListGetData(int pageNo) {
}
```
*** | apache-2.0 |
HyVar/DarwinSPL | plugins/eu.hyvar.mspl.manifest.resource.hymanifest.ui/src-gen/eu/hyvar/mspl/manifest/resource/hymanifest/ui/IHymanifestBracketHandler.java | 675 | /**
* <copyright>
* </copyright>
*
*
*/
package eu.hyvar.mspl.manifest.resource.hymanifest.ui;
/**
* The BracketHandler is responsible for handling the input of brackets. It
* automatically adds closing brackets, if the opening counterpart is entered in
* editors. It does also ignore the input of closing brackets, if these were
* automatically inserted right before.
*/
public interface IHymanifestBracketHandler {
/**
* If a closing bracket was added right before, this method returns true.
*/
public boolean addedClosingBracket();
/**
* Returns the last closing bracket that was added automatically.
*/
public String getClosingBracket();
}
| apache-2.0 |
khmarbaise/maven-plugins | maven-site-plugin/src/main/java/org/apache/maven/plugins/site/AbstractStagingMojo.java | 2132 | package org.apache.maven.plugins.site;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.commons.lang.StringUtils;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.Parameter;
/**
* Abstract base class for staging mojos.
*
* @author hboutemy
* @since 3.3
*/
public abstract class AbstractStagingMojo
extends AbstractDeployMojo
{
/**
* Top distribution management site url, for manual configuration when auto-calculated value
* doesn't match expectations. Relative module directory will be calculated from this url.
*/
@Parameter( property = "topSiteURL" )
protected String topSiteURL;
/**
* The String "staging/".
*/
protected static final String DEFAULT_STAGING_DIRECTORY = "staging/";
/**
* By default, staging mojos will get their top distribution management site url by getting top parent
* with the same site, which is a good heuristics. But in case the default value doesn't match
* expectations, <code>topSiteURL</code> can be configured: it will be used instead.
*/
@Override
protected String determineTopDistributionManagementSiteUrl()
throws MojoExecutionException
{
return ( StringUtils.isEmpty( topSiteURL ) ) ? getSite( getTopLevelProject( project ) ).getUrl() : topSiteURL;
}
}
| apache-2.0 |
OpenCMISS/neon | src/opencmiss/neon/core/problems/constants.py | 714 | '''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class RespirationConstants(object):
class ExpirationType(object):
PASSIVE = 0
ACTIVE = 1
| apache-2.0 |
sgarlick987/deck | app/scripts/modules/cloudfoundry/src/domain/ICloudFoundryServerGroup.ts | 1141 | import { IServerGroup } from '@spinnaker/core';
import { ICloudFoundrySpace, ICloudFoundryDroplet } from 'cloudfoundry/domain';
import { ICloudFoundryInstance } from 'cloudfoundry/domain/ICloudFoundryInstance';
export interface ICloudFoundryServerGroup extends IServerGroup {
appsManagerUri?: string;
diskQuota: number;
healthCheckType: string;
healthCheckHttpEndpoint: string;
state: 'STARTED' | 'STOPPED';
instances: ICloudFoundryInstance[];
metricsUri?: string;
memory: number;
space: ICloudFoundrySpace;
droplet?: ICloudFoundryDroplet;
serviceInstances: ICloudFoundryServiceInstance[];
env: ICloudFoundryEnvVar[];
ciBuild: ICloudFoundryBuildInfo;
appArtifact: ICloudFoundryArtifactInfo;
pipelineId: string;
}
export interface ICloudFoundryServiceInstance {
name: string;
plan: string;
service: string;
tags?: string[];
}
export interface ICloudFoundryEnvVar {
key: string;
value: string;
}
export interface ICloudFoundryBuildInfo {
jobName: string;
jobNumber: string;
jobUrl: string;
}
export interface ICloudFoundryArtifactInfo {
name: string;
version: string;
url: string;
}
| apache-2.0 |
shashidharatd/kubernetes | cmd/kubeadm/app/master/manifests.go | 15281 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"bytes"
"fmt"
"os"
"path"
"strings"
"github.com/ghodss/yaml"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/client-go/pkg/api/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
)
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
const (
DefaultClusterName = "kubernetes"
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
etcd = "etcd"
apiServer = "apiserver"
controllerManager = "controller-manager"
scheduler = "scheduler"
proxy = "proxy"
kubeAPIServer = "kube-apiserver"
kubeControllerManager = "kube-controller-manager"
kubeScheduler = "kube-scheduler"
kubeProxy = "kube-proxy"
)
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk
// where kubelet will pick and schedule them.
func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
volumes := []api.Volume{k8sVolume(cfg)}
volumeMounts := []api.VolumeMount{k8sVolumeMount()}
if isCertsVolumeMountNeeded() {
volumes = append(volumes, certsVolume(cfg))
volumeMounts = append(volumeMounts, certsVolumeMount())
}
if isPkiVolumeMountNeeded() {
volumes = append(volumes, pkiVolume(cfg))
volumeMounts = append(volumeMounts, pkiVolumeMount())
}
// Prepare static pod specs
staticPodSpecs := map[string]api.Pod{
kubeAPIServer: componentPod(api.Container{
Name: kubeAPIServer,
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getAPIServerCommand(cfg, false),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(8080, "/healthz"),
Resources: componentResources("250m"),
Env: getProxyEnvVars(),
}, volumes...),
kubeControllerManager: componentPod(api.Container{
Name: kubeControllerManager,
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getControllerManagerCommand(cfg, false),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(10252, "/healthz"),
Resources: componentResources("200m"),
Env: getProxyEnvVars(),
}, volumes...),
kubeScheduler: componentPod(api.Container{
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getSchedulerCommand(cfg, false),
LivenessProbe: componentProbe(10251, "/healthz"),
Resources: componentResources("100m"),
Env: getProxyEnvVars(),
}),
}
// Add etcd static pod spec only if external etcd is not configured
if len(cfg.Etcd.Endpoints) == 0 {
etcdPod := componentPod(api.Container{
Name: etcd,
Command: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--data-dir=/var/lib/etcd",
},
VolumeMounts: []api.VolumeMount{certsVolumeMount(), etcdVolumeMount(), k8sVolumeMount()},
Image: images.GetCoreImage(images.KubeEtcdImage, cfg, kubeadmapi.GlobalEnvParams.EtcdImage),
LivenessProbe: componentProbe(2379, "/health"),
Resources: componentResources("200m"),
}, certsVolume(cfg), etcdVolume(cfg), k8sVolume(cfg))
etcdPod.Spec.SecurityContext = &api.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{
// Unconfine the etcd container so it can write to /var/lib/etcd with SELinux enforcing:
Type: "spc_t",
},
}
staticPodSpecs[etcd] = etcdPod
}
manifestsPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")
if err := os.MkdirAll(manifestsPath, 0700); err != nil {
return fmt.Errorf("failed to create directory %q [%v]", manifestsPath, err)
}
for name, spec := range staticPodSpecs {
filename := path.Join(manifestsPath, name+".yaml")
serialized, err := yaml.Marshal(spec)
if err != nil {
return fmt.Errorf("failed to marshal manifest for %q to YAML [%v]", name, err)
}
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), filename); err != nil {
return fmt.Errorf("failed to create static pod manifest file for %q (%q) [%v]", name, filename, err)
}
}
return nil
}
// etcdVolume exposes a path on the host in order to guarantee data survival during reboot.
func etcdVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "etcd",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.HostEtcdPath},
},
}
}
func etcdVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "etcd",
MountPath: "/var/lib/etcd",
}
}
func isCertsVolumeMountNeeded() bool {
// Always return true for now. We may add conditional logic here for images which do not require host mounting /etc/ssl
// hyperkube for example already has valid ca-certificates installed
return true
}
// certsVolume exposes host SSL certificates to pod containers.
func certsVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "certs",
VolumeSource: api.VolumeSource{
// TODO(phase1+) make path configurable
HostPath: &api.HostPathVolumeSource{Path: "/etc/ssl/certs"},
},
}
}
func certsVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "certs",
MountPath: "/etc/ssl/certs",
}
}
func isPkiVolumeMountNeeded() bool {
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
if _, err := os.Stat("/etc/pki"); err == nil {
return true
}
return false
}
func pkiVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "pki",
VolumeSource: api.VolumeSource{
// TODO(phase1+) make path configurable
HostPath: &api.HostPathVolumeSource{Path: "/etc/pki"},
},
}
}
func pkiVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "pki",
MountPath: "/etc/pki",
}
}
func flockVolume() api.Volume {
return api.Volume{
Name: "var-lock",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "/var/lock"},
},
}
}
func flockVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "var-lock",
MountPath: "/var/lock",
ReadOnly: false,
}
}
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "k8s",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.KubernetesDir},
},
}
}
func k8sVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "k8s",
MountPath: "/etc/kubernetes/",
ReadOnly: true,
}
}
func componentResources(cpu string) api.ResourceRequirements {
return api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse(cpu),
},
}
}
func componentProbe(port int, path string) *api.Probe {
return &api.Probe{
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{
Host: "127.0.0.1",
Path: path,
Port: intstr.FromInt(port),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
FailureThreshold: 8,
}
}
func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
return api.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: container.Name,
Namespace: "kube-system",
Labels: map[string]string{"component": container.Name, "tier": "control-plane"},
},
Spec: api.PodSpec{
Containers: []api.Container{container},
HostNetwork: true,
Volumes: volumes,
},
}
}
func getComponentBaseCommand(component string) []string {
if kubeadmapi.GlobalEnvParams.HyperkubeImage != "" {
return []string{"/hyperkube", component}
}
return []string{"kube-" + component}
}
func getCertFilePath(certName string) string {
return path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, certName)
}
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted apiserver needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
}
command = append(getComponentBaseCommand(apiServer),
"--insecure-bind-address=127.0.0.1",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
"--service-account-key-file="+getCertFilePath(kubeadmconstants.ServiceAccountPublicKeyName),
"--client-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
"--tls-cert-file="+getCertFilePath(kubeadmconstants.APIServerCertName),
"--tls-private-key-file="+getCertFilePath(kubeadmconstants.APIServerKeyName),
"--kubelet-client-certificate="+getCertFilePath(kubeadmconstants.APIServerKubeletClientCertName),
"--kubelet-client-key="+getCertFilePath(kubeadmconstants.APIServerKubeletClientKeyName),
"--token-auth-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/tokens.csv",
fmt.Sprintf("--secure-port=%d", cfg.API.Port),
"--allow-privileged",
"--storage-backend=etcd3",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
// add options to configure the front proxy. Without the generated client cert, this will never be useable
// so add it unconditionally with recommended values
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file="+getCertFilePath(kubeadmconstants.FrontProxyCACertName),
"--requestheader-allowed-names=front-proxy-client",
)
if cfg.AuthorizationMode != "" {
command = append(command, "--authorization-mode="+cfg.AuthorizationMode)
switch cfg.AuthorizationMode {
case kubeadmconstants.AuthzModeABAC:
command = append(command, "--authorization-policy-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AuthorizationPolicyFile))
case kubeadmconstants.AuthzModeWebhook:
command = append(command, "--authorization-webhook-config-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AuthorizationWebhookConfigFile))
}
}
// Use first address we are given
if len(cfg.API.AdvertiseAddresses) > 0 {
if selfHosted {
command = append(command, "--advertise-address=$(POD_IP)")
} else {
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
}
}
// Check if the user decided to use an external etcd cluster
if len(cfg.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
} else {
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
}
// Is etcd secured?
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
}
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
return command
}
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted controller-manager needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"}
}
command = append(getComponentBaseCommand(controllerManager),
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
"--cluster-name="+DefaultClusterName,
"--root-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
"--service-account-private-key-file="+getCertFilePath(kubeadmconstants.ServiceAccountPrivateKeyName),
"--cluster-signing-cert-file="+getCertFilePath(kubeadmconstants.CACertName),
"--cluster-signing-key-file="+getCertFilePath(kubeadmconstants.CAKeyName),
"--insecure-experimental-approve-all-kubelet-csrs-for-group="+kubeadmconstants.CSVTokenBootstrapGroup,
)
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
// Let the controller-manager allocate Node CIDRs for the Pod network.
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
if cfg.Networking.PodSubnet != "" {
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
}
return command
}
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted apiserver needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
}
command = append(getComponentBaseCommand(scheduler),
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
)
return command
}
func getProxyEnvVars() []api.EnvVar {
envs := []api.EnvVar{}
for _, env := range os.Environ() {
pos := strings.Index(env, "=")
if pos == -1 {
// malformed environment variable, skip it.
continue
}
name := env[:pos]
value := env[pos+1:]
if strings.HasSuffix(strings.ToLower(name), "_proxy") && value != "" {
envVar := api.EnvVar{Name: name, Value: value}
envs = append(envs, envVar)
}
}
return envs
}
func getSelfHostedAPIServerEnv() []api.EnvVar {
podIPEnvVar := api.EnvVar{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
}
return append(getProxyEnvVars(), podIPEnvVar)
}
| apache-2.0 |
apache/incubator-tamaya | code/api/src/test/java/org/apache/tamaya/InvocationRecorder.java | 2591 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya;
import static org.assertj.core.api.Assertions.fail;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class InvocationRecorder {
private List<Invocation> invocations = new ArrayList<>();
private Object record(Object instance, Method method, Object[] args) throws Throwable {
Invocation invocation = new Invocation(method.getName(), args);
this.invocations.add(invocation);
return method.invoke(instance, args);
}
public <T> T createProxy(Object instance, Class<T>... types) {
return (T) Proxy.newProxyInstance(
getClass().getClassLoader(), types,
(proxy,method,params) -> this.record(instance, method, params));
}
public void recordMethodCall(Object... params) {
Exception e = new Exception();
String methodName = e.getStackTrace()[1].getMethodName();
invocations.add(new Invocation(methodName, params));
}
public static final class Invocation{
public String methodName;
public Object[] params;
public Invocation(String methodName, Object[] params) {
this.methodName = methodName;
this.params = params;
}
}
public List<Invocation> getInvocations(){
return invocations;
}
public void assertInvocation(String method, Object... params){
for(Invocation invocation:invocations){
if(invocation.methodName.equals(method)){
if(Arrays.equals(invocation.params, params)){
return;
}
}
}
fail("No such invocation: "+method + Arrays.toString(params));
}
}
| apache-2.0 |
wmydz1/gowechat | pb/accesstoken.go | 1252 | // Package pb provides underlying implementation for qy and mp
package pb
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// AccessTokenResponse stores the normal result of access token fetching.
type AccessTokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn float64 `json:"expires_in"`
}
// AccessTokenErrorResponse stores the error result of access token fetching.
type AccessTokenErrorResponse struct {
Errcode string
Errmsg string
}
// FetchAccessToken provides underlying access token fetching implementation.
func FetchAccessToken(requestLine string) (string, float64, error) {
resp, err := http.Get(requestLine)
if err != nil || resp.StatusCode != http.StatusOK {
return "", 0.0, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0.0, err
}
//Json Decoding
if bytes.Contains(body, []byte("access_token")) {
atr := AccessTokenResponse{}
err = json.Unmarshal(body, &atr)
if err != nil {
return "", 0.0, err
}
return atr.AccessToken, atr.ExpiresIn, nil
}
ater := AccessTokenErrorResponse{}
err = json.Unmarshal(body, &ater)
if err != nil {
return "", 0.0, err
}
return "", 0.0, fmt.Errorf("%s", ater.Errmsg)
}
| apache-2.0 |
lerigos/music-service | iOS_9/Pods/Target Support Files/ImagePicker/ImagePicker-umbrella.h | 144 | #import <UIKit/UIKit.h>
FOUNDATION_EXPORT double ImagePickerVersionNumber;
FOUNDATION_EXPORT const unsigned char ImagePickerVersionString[];
| apache-2.0 |
dgutierr/kie-wb-common | kie-wb-common-screens/kie-wb-common-project-editor/kie-wb-common-project-editor-backend/src/test/java/org/kie/workbench/common/screens/projecteditor/backend/server/PomEditorServiceImplTest.java | 21647 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.screens.projecteditor.backend.server;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.guvnor.common.services.backend.metadata.MetadataServerSideService;
import org.guvnor.common.services.backend.util.CommentedOptionFactory;
import org.guvnor.common.services.project.backend.server.utils.POMContentHandler;
import org.guvnor.common.services.project.model.GAV;
import org.guvnor.common.services.project.model.MavenRepositoryMetadata;
import org.guvnor.common.services.project.model.MavenRepositorySource;
import org.guvnor.common.services.project.model.POM;
import org.guvnor.common.services.project.model.ProjectRepositories;
import org.guvnor.common.services.project.service.DeploymentMode;
import org.guvnor.common.services.project.service.GAVAlreadyExistsException;
import org.guvnor.common.services.project.service.ProjectRepositoriesService;
import org.guvnor.common.services.project.service.ProjectRepositoryResolver;
import org.guvnor.common.services.shared.metadata.model.Metadata;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorContent;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorService;
import org.kie.workbench.common.screens.projecteditor.service.PomEditorService;
import org.kie.workbench.common.services.shared.project.KieProject;
import org.kie.workbench.common.services.shared.project.KieProjectService;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.uberfire.backend.vfs.Path;
import org.uberfire.io.IOService;
import org.uberfire.java.nio.base.options.CommentedOption;
import org.uberfire.java.nio.file.FileSystem;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class PomEditorServiceImplTest {
@Mock
private IOService ioService;
@Mock
private DefaultEditorService defaultEditorService;
@Mock
private MetadataServerSideService metadataService;
@Mock
private CommentedOptionFactory commentedOptionFactory;
@Mock
private KieProjectService projectService;
@Mock
private ProjectRepositoryResolver repositoryResolver;
@Mock
private ProjectRepositoriesService projectRepositoriesService;
@Mock
private Path pomPath;
@Mock
private Metadata metaData;
@Mock
private KieProject project;
@Mock
private POM pom;
@Mock
private Path projectRepositoriesPath;
private PomEditorService service;
private String pomPathUri = "default://p0/pom.xml";
private Map<String, Object> attributes = new HashMap<String, Object>();
private DefaultEditorContent content = new DefaultEditorContent();
private POMContentHandler pomContentHandler = new POMContentHandler();
private String pomXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n" +
"<modelVersion>4.0.0</modelVersion>\n" +
"<groupId>groupId</groupId>\n" +
"<artifactId>artifactId</artifactId>\n" +
"<version>0.0.1</version>\n" +
"<name>name</name>\n" +
"<description>description</description>\n" +
"</project>";
private String comment = "comment";
@BeforeClass
public static void setupSystemProperties() {
//These are not needed for the tests
System.setProperty( "org.uberfire.nio.git.daemon.enabled",
"false" );
System.setProperty( "org.uberfire.nio.git.ssh.enabled",
"false" );
System.setProperty( "org.uberfire.sys.repo.monitor.disabled",
"true" );
}
@Before
public void setup() {
service = new PomEditorServiceImpl( ioService,
defaultEditorService,
metadataService,
commentedOptionFactory,
projectService,
pomContentHandler,
repositoryResolver,
projectRepositoriesService );
when( pomPath.toURI() ).thenReturn( pomPathUri );
when( defaultEditorService.loadContent( pomPath ) ).thenReturn( content );
when( metadataService.setUpAttributes( eq( pomPath ),
any( Metadata.class ) ) ).thenReturn( attributes );
when( projectService.resolveProject( pomPath ) ).thenReturn( project );
when( project.getRepositoriesPath() ).thenReturn( projectRepositoriesPath );
when( project.getPom() ).thenReturn( pom );
}
@Test
public void testLoad() {
final DefaultEditorContent content = service.loadContent( pomPath );
assertNotNull( content );
assertEquals( this.content,
content );
}
@Test
public void testSaveNonClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>();
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 0,
resolvedRepositories.size() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is expected! We catch here rather than let JUnit handle it with
// @Test(expected = GAVAlreadyExistsException.class) so we can verify
// that only the expected methods have been invoked.
} catch ( Exception e ) {
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
never() ).startBatch( any( FileSystem.class ) );
verify( ioService,
never() ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
never() ).endBatch();
}
@Test
public void testSaveClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is should not be thrown if the GAV has not changed.
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVForced() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.FORCED );
} catch ( GAVAlreadyExistsException e ) {
fail( e.getMessage() );
}
verify( projectService,
never() ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( pomPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
}
| apache-2.0 |
drewpope/hbase | hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java | 22360 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
import org.apache.hadoop.hbase.replication.WALEntryFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.StringUtils;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.protobuf.ServiceException;
/**
* A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
* which receives the WAL edits from the WAL, and sends the edits to replicas
* of regions.
*/
@InterfaceAudience.Private
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
private static final Log LOG = LogFactory.getLog(RegionReplicaReplicationEndpoint.class);
private Configuration conf;
private ClusterConnection connection;
// Reuse WALSplitter constructs as a WAL pipe
private PipelineController controller;
private RegionReplicaOutputSink outputSink;
private EntryBuffers entryBuffers;
// Number of writer threads
private int numWriterThreads;
private int operationTimeout;
private ExecutorService pool;
@Override
public void init(Context context) throws IOException {
super.init(context);
this.conf = HBaseConfiguration.create(context.getConfiguration());
String codecClassName = conf
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
this.numWriterThreads = this.conf.getInt(
"hbase.region.replica.replication.writer.threads", 3);
controller = new PipelineController();
entryBuffers = new EntryBuffers(controller,
this.conf.getInt("hbase.region.replica.replication.buffersize",
128*1024*1024));
// use the regular RPC timeout for replica replication RPC's
this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
}
@Override
protected void doStart() {
try {
connection = (ClusterConnection) HConnectionManager.createConnection(ctx.getConfiguration());
this.pool = getDefaultThreadPool(conf);
outputSink = new RegionReplicaOutputSink(controller, entryBuffers, connection, pool,
numWriterThreads, operationTimeout);
outputSink.startWriterThreads();
super.doStart();
} catch (IOException ex) {
LOG.warn("Received exception while creating connection :" + ex);
notifyFailed(ex);
}
}
@Override
protected void doStop() {
if (outputSink != null) {
try {
outputSink.finishWritingAndClose();
} catch (IOException ex) {
LOG.warn("Got exception while trying to close OutputSink");
LOG.warn(ex);
}
}
if (this.pool != null) {
this.pool.shutdownNow();
try {
// wait for 10 sec
boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS);
if (!shutdown) {
LOG.warn("Failed to shutdown the thread pool after 10 seconds");
}
} catch (InterruptedException e) {
LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e);
}
}
if (connection != null) {
try {
connection.close();
} catch (IOException ex) {
LOG.warn("Got exception closing connection :" + ex);
}
}
super.doStop();
}
/**
* Returns a Thread pool for the RPC's to region replicas. Similar to
* Connection's thread pool.
*/
private ExecutorService getDefaultThreadPool(Configuration conf) {
int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
if (coreThreads == 0) {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
LinkedBlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<Runnable>(maxThreads *
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(
coreThreads,
maxThreads,
keepAliveTime,
TimeUnit.SECONDS,
workQueue,
Threads.newDaemonThreadFactory(this.getClass().toString() + "-rpc-shared-"));
tpe.allowCoreThreadTimeOut(true);
return tpe;
}
@Override
public boolean replicate(ReplicateContext replicateContext) {
/* A note on batching in RegionReplicaReplicationEndpoint (RRRE):
*
* RRRE relies on batching from two different mechanisms. The first is the batching from
* ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single
* WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most
* "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing).
* Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits
* to the WALSplitter.EntryBuffers which is a blocking buffer space of up to
* "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits
* based on regions.
*
* There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which
* pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink).
* The SinkWriter in this case will send the wal edits to all secondary region replicas in
* parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is
* being written to the sink, another buffer for the same region will not be made available to
* writers ensuring regions edits are not replayed out of order.
*
* The replicate() call won't return until all the buffers are sent and ack'd by the sinks so
* that the replication can assume all edits are persisted. We may be able to do a better
* pipelining between the replication thread and output sinks later if it becomes a bottleneck.
*/
while (this.isRunning()) {
try {
for (Entry entry: replicateContext.getEntries()) {
entryBuffers.appendEntry(entry);
}
outputSink.flush(); // make sure everything is flushed
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
} catch (IOException e) {
LOG.warn("Received IOException while trying to replicate"
+ StringUtils.stringifyException(e));
}
}
return false;
}
@Override
public boolean canReplicateToSameCluster() {
return true;
}
@Override
protected WALEntryFilter getScopeWALEntryFilter() {
// we do not care about scope. We replicate everything.
return null;
}
static class RegionReplicaOutputSink extends OutputSink {
private RegionReplicaSinkWriter sinkWriter;
public RegionReplicaOutputSink(PipelineController controller, EntryBuffers entryBuffers,
ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) {
super(controller, entryBuffers, numWriters);
this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout);
}
@Override
public void append(RegionEntryBuffer buffer) throws IOException {
List<Entry> entries = buffer.getEntryBuffer();
if (entries.isEmpty() || entries.get(0).getEdit().getCells().isEmpty()) {
return;
}
sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(),
entries.get(0).getEdit().getCells().get(0).getRow(), entries);
}
@Override
public boolean flush() throws IOException {
// nothing much to do for now. Wait for the Writer threads to finish up
// append()'ing the data.
entryBuffers.waitUntilDrained();
return super.flush();
}
@Override
public List<Path> finishWritingAndClose() throws IOException {
finishWriting();
return null;
}
@Override
public Map<byte[], Long> getOutputCounts() {
return null; // only used in tests
}
@Override
public int getNumberOfRecoveredRegions() {
return 0;
}
AtomicLong getSkippedEditsCounter() {
return skippedEdits;
}
}
static class RegionReplicaSinkWriter extends SinkWriter {
RegionReplicaOutputSink sink;
ClusterConnection connection;
RpcControllerFactory rpcControllerFactory;
RpcRetryingCallerFactory rpcRetryingCallerFactory;
int operationTimeout;
ExecutorService pool;
Cache<TableName, Boolean> disabledAndDroppedTables;
public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
ExecutorService pool, int operationTimeout) {
this.sink = sink;
this.connection = connection;
this.operationTimeout = operationTimeout;
this.rpcRetryingCallerFactory
= RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
this.pool = pool;
int nonExistentTableCacheExpiryMs = connection.getConfiguration()
.getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
// A cache for non existing tables that have a default expiry of 5 sec. This means that if the
// table is created again with the same name, we might miss to replicate for that amount of
// time. But this cache prevents overloading meta requests for every edit from a deleted file.
disabledAndDroppedTables = CacheBuilder.newBuilder()
.expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
.initialCapacity(10)
.maximumSize(1000)
.build();
}
public void append(TableName tableName, byte[] encodedRegionName, byte[] row,
List<Entry> entries) throws IOException {
if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
sink.getSkippedEditsCounter().incrementAndGet();
return;
}
// get the replicas of the primary region
RegionLocations locations = null;
try {
locations = getRegionLocations(connection, tableName, row, true, 0);
if (locations == null) {
throw new HBaseIOException("Cannot locate locations for "
+ tableName + ", row:" + Bytes.toStringBinary(row));
}
} catch (TableNotFoundException e) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
// skip this entry
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
if (locations.size() == 1) {
return;
}
ArrayList<Future<ReplicateWALEntryResponse>> tasks
= new ArrayList<Future<ReplicateWALEntryResponse>>(2);
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out.
HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(),
encodedRegionName)) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
// All passed entries should belong to one region because it is coming from the EntryBuffers
// split per region. But the regions might split and merge (unlike log recovery case).
for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
HRegionLocation location = locations.getRegionLocation(replicaId);
if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
HRegionInfo regionInfo = location == null
? RegionReplicaUtil.getRegionInfoForReplica(
locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
: location.getRegionInfo();
RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
rpcControllerFactory, tableName, location, regionInfo, row, entries,
sink.getSkippedEditsCounter());
Future<ReplicateWALEntryResponse> task = pool.submit(
new RetryingRpcCallable<ReplicateWALEntryResponse>(rpcRetryingCallerFactory,
callable, operationTimeout));
tasks.add(task);
}
}
boolean tasksCancelled = false;
for (Future<ReplicateWALEntryResponse> task : tasks) {
try {
task.get();
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
// The table can be disabled or dropped at this time. For disabled tables, we have no
// cheap mechanism to detect this case because meta does not contain this information.
// HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
// So instead we start the replay RPC with retries and
// check whether the table is dropped or disabled which might cause
// SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE.
if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
if (!tasksCancelled) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
tasksCancelled = true; // so that we do not add to skipped counter again
}
continue;
}
// otherwise rethrow
throw (IOException)cause;
}
// unexpected exception
throw new IOException(cause);
}
}
}
}
static class RetryingRpcCallable<V> implements Callable<V> {
RpcRetryingCallerFactory factory;
RetryingCallable<V> callable;
int timeout;
public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable<V> callable,
int timeout) {
this.factory = factory;
this.callable = callable;
this.timeout = timeout;
}
@Override
public V call() throws Exception {
return factory.<V>newCaller().callWithRetries(callable, timeout);
}
}
/**
* Calls replay on the passed edits for the given set of entries belonging to the region. It skips
* the entry if the region boundaries have changed or the region is gone.
*/
static class RegionReplicaReplayCallable
extends RegionAdminServiceCallable<ReplicateWALEntryResponse> {
// replicaId of the region replica that we want to replicate to
private final int replicaId;
private final List<Entry> entries;
private final byte[] initialEncodedRegionName;
private final AtomicLong skippedEntries;
private final RpcControllerFactory rpcControllerFactory;
private boolean skip;
public RegionReplicaReplayCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory, TableName tableName,
HRegionLocation location, HRegionInfo regionInfo, byte[] row,List<Entry> entries,
AtomicLong skippedEntries) {
super(connection, location, tableName, row);
this.replicaId = regionInfo.getReplicaId();
this.entries = entries;
this.rpcControllerFactory = rpcControllerFactory;
this.skippedEntries = skippedEntries;
this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
}
@Override
public HRegionLocation getLocation(boolean useCache) throws IOException {
RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId);
if (rl == null) {
throw new HBaseIOException(getExceptionMessage());
}
location = rl.getRegionLocation(replicaId);
if (location == null) {
throw new HBaseIOException(getExceptionMessage());
}
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out because we do not need it.
// Regions can change because of (1) region split (2) region merge (3) table recreated
if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
initialEncodedRegionName)) {
skip = true;
return null;
}
return location;
}
@Override
public ReplicateWALEntryResponse call(int timeout) throws IOException {
return replayToServer(this.entries, timeout);
}
private ReplicateWALEntryResponse replayToServer(List<Entry> entries, int timeout)
throws IOException {
if (entries.isEmpty() || skip) {
skippedEntries.incrementAndGet();
return ReplicateWALEntryResponse.newBuilder().build();
}
Entry[] entriesArray = new Entry[entries.size()];
entriesArray = entries.toArray(entriesArray);
// set the region name for the target region replica
Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
ReplicationProtbufUtil.buildReplicateWALEntryRequest(
entriesArray, location.getRegionInfo().getEncodedNameAsBytes());
try {
PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
controller.setCallTimeout(timeout);
controller.setPriority(tableName);
return stub.replay(controller, p.getFirst());
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
@Override
protected String getExceptionMessage() {
return super.getExceptionMessage() + " table=" + tableName
+ " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row);
}
}
private static RegionLocations getRegionLocations(
ClusterConnection connection, TableName tableName, byte[] row,
boolean useCache, int replicaId)
throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
RegionLocations rl;
try {
rl = connection.locateRegion(tableName, row, useCache, true, replicaId);
} catch (DoNotRetryIOException e) {
throw e;
} catch (RetriesExhaustedException e) {
throw e;
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
throw new RetriesExhaustedException("Can't get the location", e);
}
if (rl == null) {
throw new RetriesExhaustedException("Can't get the locations");
}
return rl;
}
}
| apache-2.0 |
SAT-Hadoop/hadoop-2.6.0 | share/doc/hadoop/api/org/apache/hadoop/yarn/exceptions/class-use/ApplicationIdNotProvidedException.html | 6481 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_45) on Thu Nov 13 21:22:01 UTC 2014 -->
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<TITLE>
Uses of Class org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException (Apache Hadoop Main 2.6.0 API)
</TITLE>
<META NAME="date" CONTENT="2014-11-13">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException (Apache Hadoop Main 2.6.0 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../org/apache/hadoop/yarn/exceptions/ApplicationIdNotProvidedException.html" title="class in org.apache.hadoop.yarn.exceptions"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?org/apache/hadoop/yarn/exceptions//class-useApplicationIdNotProvidedException.html" target="_top"><B>FRAMES</B></A>
<A HREF="ApplicationIdNotProvidedException.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException</B></H2>
</CENTER>
No usage of org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../org/apache/hadoop/yarn/exceptions/ApplicationIdNotProvidedException.html" title="class in org.apache.hadoop.yarn.exceptions"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?org/apache/hadoop/yarn/exceptions//class-useApplicationIdNotProvidedException.html" target="_top"><B>FRAMES</B></A>
<A HREF="ApplicationIdNotProvidedException.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2014 <a href="http://www.apache.org">Apache Software Foundation</a>. All Rights Reserved.
</BODY>
</HTML>
| apache-2.0 |
snicoll/initializr | initializr-actuator/src/main/java/io/spring/initializr/actuate/stat/ProjectRequestDocumentFactory.java | 5855 | /*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.spring.initializr.actuate.stat;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ClientInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.DependencyInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ErrorStateInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.VersionInformation;
import io.spring.initializr.generator.version.Version;
import io.spring.initializr.metadata.InitializrMetadata;
import io.spring.initializr.web.project.ProjectFailedEvent;
import io.spring.initializr.web.project.ProjectRequest;
import io.spring.initializr.web.project.ProjectRequestEvent;
import io.spring.initializr.web.project.WebProjectRequest;
import io.spring.initializr.web.support.Agent;
import org.springframework.util.StringUtils;
/**
* Create {@link ProjectRequestDocument} instances.
*
* @author Stephane Nicoll
*/
public class ProjectRequestDocumentFactory {
public ProjectRequestDocument createDocument(ProjectRequestEvent event) {
InitializrMetadata metadata = event.getMetadata();
ProjectRequest request = event.getProjectRequest();
ProjectRequestDocument document = new ProjectRequestDocument();
document.setGenerationTimestamp(event.getTimestamp());
document.setGroupId(request.getGroupId());
document.setArtifactId(request.getArtifactId());
document.setPackageName(request.getPackageName());
document.setVersion(determineVersionInformation(request));
document.setClient(determineClientInformation(request));
document.setJavaVersion(request.getJavaVersion());
if (StringUtils.hasText(request.getJavaVersion())
&& metadata.getJavaVersions().get(request.getJavaVersion()) == null) {
document.triggerError().setJavaVersion(true);
}
document.setLanguage(request.getLanguage());
if (StringUtils.hasText(request.getLanguage()) && metadata.getLanguages().get(request.getLanguage()) == null) {
document.triggerError().setLanguage(true);
}
document.setPackaging(request.getPackaging());
if (StringUtils.hasText(request.getPackaging())
&& metadata.getPackagings().get(request.getPackaging()) == null) {
document.triggerError().setPackaging(true);
}
document.setType(request.getType());
document.setBuildSystem(determineBuildSystem(request));
if (StringUtils.hasText(request.getType()) && metadata.getTypes().get(request.getType()) == null) {
document.triggerError().setType(true);
}
// Let's not rely on the resolved dependencies here
List<String> dependencies = new ArrayList<>(request.getDependencies());
List<String> validDependencies = dependencies.stream()
.filter((id) -> metadata.getDependencies().get(id) != null).collect(Collectors.toList());
document.setDependencies(new DependencyInformation(validDependencies));
List<String> invalidDependencies = dependencies.stream().filter((id) -> (!validDependencies.contains(id)))
.collect(Collectors.toList());
if (!invalidDependencies.isEmpty()) {
document.triggerError().triggerInvalidDependencies(invalidDependencies);
}
// Let's make sure that the document is flagged as invalid no matter what
if (event instanceof ProjectFailedEvent) {
ErrorStateInformation errorState = document.triggerError();
ProjectFailedEvent failed = (ProjectFailedEvent) event;
if (failed.getCause() != null) {
errorState.setMessage(failed.getCause().getMessage());
}
}
return document;
}
private String determineBuildSystem(ProjectRequest request) {
String type = request.getType();
String[] elements = type.split("-");
return (elements.length == 2) ? elements[0] : null;
}
private VersionInformation determineVersionInformation(ProjectRequest request) {
Version version = Version.safeParse(request.getBootVersion());
if (version != null && version.getMajor() != null) {
return new VersionInformation(version);
}
return null;
}
private ClientInformation determineClientInformation(ProjectRequest request) {
if (request instanceof WebProjectRequest) {
WebProjectRequest webProjectRequest = (WebProjectRequest) request;
Agent agent = determineAgent(webProjectRequest);
String ip = determineIp(webProjectRequest);
String country = determineCountry(webProjectRequest);
if (agent != null || ip != null || country != null) {
return new ClientInformation(agent, ip, country);
}
}
return null;
}
private Agent determineAgent(WebProjectRequest request) {
String userAgent = (String) request.getParameters().get("user-agent");
if (StringUtils.hasText(userAgent)) {
return Agent.fromUserAgent(userAgent);
}
return null;
}
private String determineIp(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-connecting-ip");
return (StringUtils.hasText(candidate)) ? candidate : (String) request.getParameters().get("x-forwarded-for");
}
private String determineCountry(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-ipcountry");
if (StringUtils.hasText(candidate) && !"xx".equalsIgnoreCase(candidate)) {
return candidate;
}
return null;
}
}
| apache-2.0 |
miguelpalacio/sl4a | docs/UsefulLinks.md | 164 | # Useful Links #
## External Editors ##
Touchqode has syntax hilighting and integrates nicely with sl4a.
https://market.android.com/details?id=com.touchqode.editor | apache-2.0 |
patrickfav/tuwien | master/swt workspace/HTMLParser/src/org/htmlparser/util/ParserException.java | 1512 | // HTMLParser Library - A java-based parser for HTML
// http://htmlparser.org
// Copyright (C) 2006 Claude Duguay
//
// Revision Control Information
//
// $URL: https://svn.sourceforge.net/svnroot/htmlparser/trunk/lexer/src/main/java/org/htmlparser/util/ParserException.java $
// $Author: derrickoswald $
// $Date: 2006-09-16 10:44:17 -0400 (Sat, 16 Sep 2006) $
// $Revision: 4 $
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the Common Public License; either
// version 1.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Common Public License for more details.
//
// You should have received a copy of the Common Public License
// along with this library; if not, the license is available from
// the Open Source Initiative (OSI) website:
// http://opensource.org/licenses/cpl1.0.php
package org.htmlparser.util;
/**
* Library-specific support for chained exceptions.
*
* @see ChainedException
**/
public class ParserException
extends ChainedException
{
public ParserException() {}
public ParserException(String message)
{
super(message);
}
public ParserException(Throwable throwable)
{
super(throwable);
}
public ParserException(String message, Throwable throwable)
{
super(message, throwable);
}
}
| apache-2.0 |
OnurKirkizoglu/master_thesis | at.jku.sea.cloud/src/main/java/at/jku/sea/cloud/exceptions/ArtifactNotPushOrPullableException.java | 328 | package at.jku.sea.cloud.exceptions;
public class ArtifactNotPushOrPullableException extends RuntimeException {
private static final long serialVersionUID = 1L;
public ArtifactNotPushOrPullableException(final long aid) {
super("artifact (id=" + aid + ") references (type, package, project) existing only in WS");
}
} | apache-2.0 |
tecknowledgeable/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java | 16024 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
/**
* Tool which allows the standby node's storage directories to be bootstrapped
* by copying the latest namespace snapshot from the active namenode. This is
* used when first configuring an HA cluster.
*/
@InterfaceAudience.Private
public class BootstrapStandby implements Tool, Configurable {
private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
private String nsId;
private String nnId;
private String otherNNId;
private URL otherHttpAddr;
private InetSocketAddress otherIpcAddr;
private Collection<URI> dirsToFormat;
private List<URI> editUrisToFormat;
private List<URI> sharedEditsUris;
private Configuration conf;
private boolean force = false;
private boolean interactive = true;
private boolean skipSharedEditsCheck = false;
// Exit/return codes.
static final int ERR_CODE_FAILED_CONNECT = 2;
static final int ERR_CODE_INVALID_VERSION = 3;
// Skip 4 - was used in previous versions, but no longer returned.
static final int ERR_CODE_ALREADY_FORMATTED = 5;
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
@Override
public int run(String[] args) throws Exception {
parseArgs(args);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
InetSocketAddress myAddr = NameNode.getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
private void parseArgs(String[] args) {
for (String arg : args) {
if ("-force".equals(arg)) {
force = true;
} else if ("-nonInteractive".equals(arg)) {
interactive = false;
} else if ("-skipSharedEditsCheck".equals(arg)) {
skipSharedEditsCheck = true;
} else {
printUsage();
throw new HadoopIllegalArgumentException(
"Illegal argument: " + arg);
}
}
}
private void printUsage() {
System.err.println("Usage: " + this.getClass().getSimpleName() +
" [-force] [-nonInteractive] [-skipSharedEditsCheck]");
}
private NamenodeProtocol createNNProtocolProxy()
throws IOException {
return NameNodeProxies.createNonHAProxy(getConf(),
otherIpcAddr, NamenodeProtocol.class,
UserGroupInformation.getLoginUser(), true)
.getProxy();
}
private int doRun() throws IOException {
NamenodeProtocol proxy = createNNProtocolProxy();
NamespaceInfo nsInfo;
boolean isUpgradeFinalized;
try {
nsInfo = proxy.versionRequest();
isUpgradeFinalized = proxy.isUpgradeFinalized();
} catch (IOException ioe) {
LOG.fatal("Unable to fetch namespace information from active NN at " +
otherIpcAddr + ": " + ioe.getMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Full exception trace", ioe);
}
return ERR_CODE_FAILED_CONNECT;
}
if (!checkLayoutVersion(nsInfo)) {
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
+ ") does not match " + "this node's layout version ("
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ")");
return ERR_CODE_INVALID_VERSION;
}
System.out.println(
"=====================================================\n" +
"About to bootstrap Standby ID " + nnId + " from:\n" +
" Nameservice ID: " + nsId + "\n" +
" Other Namenode ID: " + otherNNId + "\n" +
" Other NN's HTTP address: " + otherHttpAddr + "\n" +
" Other NN's IPC address: " + otherIpcAddr + "\n" +
" Namespace ID: " + nsInfo.getNamespaceID() + "\n" +
" Block pool ID: " + nsInfo.getBlockPoolID() + "\n" +
" Cluster ID: " + nsInfo.getClusterID() + "\n" +
" Layout version: " + nsInfo.getLayoutVersion() + "\n" +
" isUpgradeFinalized: " + isUpgradeFinalized + "\n" +
"=====================================================");
NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
if (!isUpgradeFinalized) {
// the remote NameNode is in upgrade state, this NameNode should also
// create the previous directory. First prepare the upgrade and rename
// the current dir to previous.tmp.
LOG.info("The active NameNode is in Upgrade. " +
"Prepare the upgrade for the standby NameNode as well.");
if (!doPreUpgrade(storage, nsInfo)) {
return ERR_CODE_ALREADY_FORMATTED;
}
} else if (!format(storage, nsInfo)) { // prompt the user to format storage
return ERR_CODE_ALREADY_FORMATTED;
}
// download the fsimage from active namenode
int download = downloadImage(storage, proxy);
if (download != 0) {
return download;
}
// finish the upgrade: rename previous.tmp to previous
if (!isUpgradeFinalized) {
doUpgrade(storage);
}
return 0;
}
/**
* Iterate over all the storage directories, checking if it should be
* formatted. Format the storage if necessary and allowed by the user.
* @return True if formatting is processed
*/
private boolean format(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
// Check with the user before blowing away data.
if (!Storage.confirmFormat(storage.dirIterable(null), force, interactive)) {
storage.close();
return false;
} else {
// Format the storage (writes VERSION file)
storage.format(nsInfo);
return true;
}
}
/**
* This is called when using bootstrapStandby for HA upgrade. The SBN should
* also create previous directory so that later when it starts, it understands
* that the cluster is in the upgrade state. This function renames the old
* current directory to previous.tmp.
*/
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
boolean isFormatted = false;
Map<StorageDirectory, StorageState> dataDirStates =
new HashMap<StorageDirectory, StorageState>();
try {
isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage,
dataDirStates);
if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
// recoverStorageDirs returns true if there is a formatted directory
isFormatted = false;
System.err.println("The original storage directory is not formatted.");
}
} catch (InconsistentFSStateException e) {
// if the storage is in a bad state,
LOG.warn("The storage directory is in an inconsistent state", e);
} finally {
storage.unlockAll();
}
// if there is InconsistentFSStateException or the storage is not formatted,
// format the storage. Although this format is done through the new
// software, since in HA setup the SBN is rolled back through
// "-bootstrapStandby", we should still be fine.
if (!isFormatted && !format(storage, nsInfo)) {
return false;
}
// make sure there is no previous directory
FSImage.checkUpgrade(storage);
// Do preUpgrade for each directory
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.renameCurToTmp(sd);
} catch (IOException e) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), e);
throw e;
}
}
storage.setStorageInfo(nsInfo);
storage.setBlockPoolID(nsInfo.getBlockPoolID());
return true;
}
private void doUpgrade(NNStorage storage) throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doUpgrade(sd, storage);
}
}
private int downloadImage(NNStorage storage, NamenodeProtocol proxy)
throws IOException {
// Load the newly formatted image, using all of the directories
// (including shared edits)
final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
image.getStorage().setStorageInfo(storage);
image.initEditLog(StartupOption.REGULAR);
assert image.getEditLog().isOpenForRead() :
"Expected edit log to be open for read";
// Ensure that we have enough edits already in the shared directory to
// start up from the last checkpoint on the active.
if (!skipSharedEditsCheck &&
!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}
image.getStorage().writeTransactionIdFileToStorage(curTxId);
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(
otherHttpAddr, imageTxId, storage, true);
image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
hash);
} catch (IOException ioe) {
image.close();
throw ioe;
}
return 0;
}
private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
long curTxIdOnOtherNode) {
if (imageTxId == curTxIdOnOtherNode) {
// The other node hasn't written any logs since the last checkpoint.
// This can be the case if the NN was freshly formatted as HA, and
// then started in standby mode, so it has no edit logs at all.
return true;
}
long firstTxIdInLogs = imageTxId + 1;
assert curTxIdOnOtherNode >= firstTxIdInLogs :
"first=" + firstTxIdInLogs + " onOtherNode=" + curTxIdOnOtherNode;
try {
Collection<EditLogInputStream> streams =
image.getEditLog().selectInputStreams(
firstTxIdInLogs, curTxIdOnOtherNode, null, true);
for (EditLogInputStream stream : streams) {
IOUtils.closeStream(stream);
}
return true;
} catch (IOException e) {
String msg = "Unable to read transaction ids " +
firstTxIdInLogs + "-" + curTxIdOnOtherNode +
" from the configured shared edits storage " +
Joiner.on(",").join(sharedEditsUris) + ". " +
"Please copy these logs into the shared edits storage " +
"or call saveNamespace on the active node.\n" +
"Error: " + e.getLocalizedMessage();
if (LOG.isDebugEnabled()) {
LOG.fatal(msg, e);
} else {
LOG.fatal(msg);
}
return false;
}
}
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION);
}
private void parseConfAndFindOtherNN() throws IOException {
Configuration conf = getConf();
nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
nnId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, nnId);
if (!HAUtil.usesSharedEditsDir(conf)) {
throw new HadoopIllegalArgumentException(
"Shared edits storage is not enabled for this namenode.");
}
Configuration otherNode = HAUtil.getConfForOtherNode(conf);
otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
!otherIpcAddr.getAddress().isAnyLocalAddress(),
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
final String scheme = DFSUtil.getHttpClientScheme(conf);
otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
otherIpcAddr.getHostName(), otherNode, scheme).toURL();
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
conf, false);
sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = DFSHAAdmin.addSecurityConfiguration(conf);
}
@Override
public Configuration getConf() {
return conf;
}
public static int run(String[] argv, Configuration conf) throws IOException {
BootstrapStandby bs = new BootstrapStandby();
bs.setConf(conf);
try {
return ToolRunner.run(bs, argv);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException)e;
} else {
throw new IOException(e);
}
}
}
}
| apache-2.0 |
vishnu-kumar/ec2-api | ec2api/tests/functional/api/test_snapshots.py | 10696 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class SnapshotTest(base.EC2TestCase):
def test_create_delete_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_describe_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
ownerId = data['OwnerId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(SnapshotIds=[snapshot_id])
self.assertEqual(1, len(data['Snapshots']))
data = data['Snapshots'][0]
self.assertEqual(snapshot_id, data['SnapshotId'])
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(OwnerIds=[ownerId])
data = [s for s in data['Snapshots'] if s['SnapshotId'] == snapshot_id]
self.assertEqual(1, len(data))
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
self.assertRaises('InvalidSnapshot.NotFound',
self.client.describe_snapshots,
SnapshotIds=[snapshot_id])
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_create_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
vol1 = data
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(vol1['Size'], data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.describe_volumes(
Filters=[{'Name': 'snapshot-id', 'Values': [snapshot_id]}])
self.assertEqual(1, len(data['Volumes']))
self.assertEqual(volume_id2, data['Volumes'][0]['VolumeId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
def test_create_increased_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'Size': 2,
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(2, data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
"Openstack can't delete volume with snapshots")
def test_delete_volume_with_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
| apache-2.0 |
zcy421593/aliyun-openapi-cpp-sdk | aliyun-api-rds/2014-08-15/src/ali_rds_release_instance_public_connection.cc | 3520 | #include <stdio.h>
#include "ali_api_core.h"
#include "ali_string_utils.h"
#include "ali_rds.h"
#include "json/value.h"
#include "json/reader.h"
using namespace aliyun;
namespace {
void Json2Type(const Json::Value& value, std::string* item);
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item);
template<typename T>
class Json2Array {
public:
Json2Array(const Json::Value& value, std::vector<T>* vec) {
if(!value.isArray()) {
return;
}
for(int i = 0; i < value.size(); i++) {
T val;
Json2Type(value[i], &val);
vec->push_back(val);
}
}
};
void Json2Type(const Json::Value& value, std::string* item) {
*item = value.asString();
}
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item) {
}
}
int Rds::ReleaseInstancePublicConnection(const RdsReleaseInstancePublicConnectionRequestType& req,
RdsReleaseInstancePublicConnectionResponseType* response,
RdsErrorInfo* error_info) {
std::string str_response;
int status_code;
int ret = 0;
bool parse_success = false;
std::string secheme = this->use_tls_ ? "https" : "http";
AliRpcRequest* req_rpc = new AliRpcRequest(version_,
appid_,
secret_,
secheme + "://" + host_);
if((!this->use_tls_) && this->proxy_host_ && this->proxy_host_[0]) {
req_rpc->SetHttpProxy( this->proxy_host_);
}
Json::Value val;
Json::Reader reader;
req_rpc->AddRequestQuery("Action","ReleaseInstancePublicConnection");
if(!req.owner_id.empty()) {
req_rpc->AddRequestQuery("OwnerId", req.owner_id);
}
if(!req.resource_owner_account.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerAccount", req.resource_owner_account);
}
if(!req.resource_owner_id.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerId", req.resource_owner_id);
}
if(!req.db_instance_id.empty()) {
req_rpc->AddRequestQuery("DBInstanceId", req.db_instance_id);
}
if(!req.current_connection_string.empty()) {
req_rpc->AddRequestQuery("CurrentConnectionString", req.current_connection_string);
}
if(!req.owner_account.empty()) {
req_rpc->AddRequestQuery("OwnerAccount", req.owner_account);
}
if(this->region_id_ && this->region_id_[0]) {
req_rpc->AddRequestQuery("RegionId", this->region_id_);
}
if(req_rpc->CommitRequest() != 0) {
if(error_info) {
error_info->code = "connect to host failed";
}
ret = -1;
goto out;
}
status_code = req_rpc->WaitResponseHeaderComplete();
req_rpc->ReadResponseBody(str_response);
if(status_code > 0 && !str_response.empty()){
parse_success = reader.parse(str_response, val);
}
if(!parse_success) {
if(error_info) {
error_info->code = "parse response failed";
}
ret = -1;
goto out;
}
if(status_code!= 200 && error_info && parse_success) {
error_info->request_id = val.isMember("RequestId") ? val["RequestId"].asString(): "";
error_info->code = val.isMember("Code") ? val["Code"].asString(): "";
error_info->host_id = val.isMember("HostId") ? val["HostId"].asString(): "";
error_info->message = val.isMember("Message") ? val["Message"].asString(): "";
}
if(status_code== 200 && response) {
Json2Type(val, response);
}
ret = status_code;
out:
delete req_rpc;
return ret;
}
| apache-2.0 |
sensorstorm/SensorStorm | SensorStorm/src/nl/tno/sensorstorm/particlemapper/package-info.java | 128 | /**
* This package contains classes for mapping between Particles and Tuples.
*/
package nl.tno.sensorstorm.particlemapper; | apache-2.0 |
msbeta/apollo | modules/localization/msf/common/io/velodyne_utility.cc | 6790 | /******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/localization/msf/common/io/velodyne_utility.h"
#include <pcl/io/pcd_io.h>
#include <yaml-cpp/yaml.h>
#include "cyber/common/log.h"
#include "modules/localization/msf/common/io/pcl_point_types.h"
namespace apollo {
namespace localization {
namespace msf {
namespace velodyne {
void LoadPcds(const std::string& file_path, const unsigned int frame_index,
const Eigen::Affine3d& pose, VelodyneFrame* velodyne_frame,
bool is_global) {
velodyne_frame->frame_index = frame_index;
velodyne_frame->pose = pose;
LoadPcds(file_path, frame_index, pose, &velodyne_frame->pt3ds,
&velodyne_frame->intensities, is_global);
}
void LoadPcds(const std::string& file_path, const unsigned int frame_index,
const Eigen::Affine3d& pose, std::vector<Eigen::Vector3d>* pt3ds,
std::vector<unsigned char>* intensities, bool is_global) {
Eigen::Affine3d pose_inv = pose.inverse();
pcl::PointCloud<PointXYZIT>::Ptr cloud(new pcl::PointCloud<PointXYZIT>);
if (pcl::io::loadPCDFile(file_path, *cloud) >= 0) {
if (cloud->height == 1 || cloud->width == 1) {
AERROR << "Un-organized-point-cloud";
for (unsigned int i = 0; i < cloud->size(); ++i) {
Eigen::Vector3d pt3d;
pt3d[0] = (*cloud)[i].x;
pt3d[1] = (*cloud)[i].y;
pt3d[2] = (*cloud)[i].z;
if (pt3d[0] == pt3d[0] && pt3d[1] == pt3d[1] && pt3d[2] == pt3d[2]) {
Eigen::Vector3d pt3d_local;
if (is_global) {
pt3d_local = pose_inv * pt3d;
} else {
pt3d_local = pt3d;
}
unsigned char intensity =
static_cast<unsigned char>((*cloud)[i].intensity);
pt3ds->push_back(pt3d_local);
intensities->push_back(intensity);
}
}
} else {
for (unsigned int h = 0; h < cloud->height; ++h) {
for (unsigned int w = 0; w < cloud->width; ++w) {
double x = cloud->at(w, h).x;
double y = cloud->at(w, h).y;
double z = cloud->at(w, h).z;
Eigen::Vector3d pt3d(x, y, z);
if (pt3d[0] == pt3d[0] && pt3d[1] == pt3d[1] && pt3d[2] == pt3d[2]) {
Eigen::Vector3d pt3d_local;
if (is_global) {
pt3d_local = pose_inv * pt3d;
} else {
pt3d_local = pt3d;
}
unsigned char intensity =
static_cast<unsigned char>(cloud->at(w, h).intensity);
pt3ds->push_back(pt3d_local);
intensities->push_back(intensity);
}
}
}
}
} else {
AERROR << "Failed to load PCD file: " << file_path;
}
}
void LoadPcdPoses(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<double>* timestamps) {
std::vector<unsigned int> pcd_indices;
LoadPcdPoses(file_path, poses, timestamps, &pcd_indices);
}
void LoadPcdPoses(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<double>* timestamps,
std::vector<unsigned int>* pcd_indices) {
poses->clear();
timestamps->clear();
pcd_indices->clear();
FILE* file = fopen(file_path.c_str(), "r");
if (file) {
unsigned int index;
double timestamp;
double x, y, z;
double qx, qy, qz, qr;
constexpr int kSize = 9;
while (fscanf(file, "%u %lf %lf %lf %lf %lf %lf %lf %lf\n", &index,
×tamp, &x, &y, &z, &qx, &qy, &qz, &qr) == kSize) {
Eigen::Translation3d trans(Eigen::Vector3d(x, y, z));
Eigen::Quaterniond quat(qr, qx, qy, qz);
poses->push_back(trans * quat);
timestamps->push_back(timestamp);
pcd_indices->push_back(index);
}
fclose(file);
} else {
AERROR << "Can't open file to read: " << file_path;
}
}
void LoadPosesAndStds(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<Eigen::Vector3d>* stds,
std::vector<double>* timestamps) {
poses->clear();
stds->clear();
timestamps->clear();
FILE* file = fopen(file_path.c_str(), "r");
if (file) {
unsigned int index;
double timestamp;
double x, y, z;
double qx, qy, qz, qr;
double std_x, std_y, std_z;
constexpr int kSize = 12;
while (fscanf(file, "%u %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
&index, ×tamp, &x, &y, &z, &qx, &qy, &qz, &qr, &std_x,
&std_y, &std_z) == kSize) {
Eigen::Translation3d trans(Eigen::Vector3d(x, y, z));
Eigen::Quaterniond quat(qr, qx, qy, qz);
poses->push_back(trans * quat);
timestamps->push_back(timestamp);
Eigen::Vector3d std;
std << std_x, std_y, std_z;
stds->push_back(std);
}
fclose(file);
} else {
AERROR << "Can't open file to read: " << file_path;
}
}
bool LoadExtrinsic(const std::string& file_path, Eigen::Affine3d* extrinsic) {
YAML::Node config = YAML::LoadFile(file_path);
if (config["transform"]) {
if (config["transform"]["translation"]) {
extrinsic->translation()(0) =
config["transform"]["translation"]["x"].as<double>();
extrinsic->translation()(1) =
config["transform"]["translation"]["y"].as<double>();
extrinsic->translation()(2) =
config["transform"]["translation"]["z"].as<double>();
if (config["transform"]["rotation"]) {
double qx = config["transform"]["rotation"]["x"].as<double>();
double qy = config["transform"]["rotation"]["y"].as<double>();
double qz = config["transform"]["rotation"]["z"].as<double>();
double qw = config["transform"]["rotation"]["w"].as<double>();
extrinsic->linear() =
Eigen::Quaterniond(qw, qx, qy, qz).toRotationMatrix();
return true;
}
}
}
return false;
}
} // namespace velodyne
} // namespace msf
} // namespace localization
} // namespace apollo
| apache-2.0 |
zaeleus/rust | src/test/run-pass/reexported-static-methods-cross-crate.rs | 906 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:reexported_static_methods.rs
// pretty-expanded FIXME #23616
extern crate reexported_static_methods;
use reexported_static_methods::Foo;
use reexported_static_methods::Baz;
use reexported_static_methods::Boz;
use reexported_static_methods::Bort;
pub fn main() {
assert_eq!(42_isize, Foo::foo());
assert_eq!(84_isize, Baz::bar());
assert!(Boz::boz(1));
assert_eq!("bort()".to_string(), Bort::bort());
}
| apache-2.0 |
WANdisco/amplab-hive | ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java | 7518 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.udf.generic;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.TimestampConverter;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
/**
* GenericUDFAddMonths.
*
* Add a number of months to the date. The time part of the string will be
* ignored.
*
*/
@Description(name = "add_months",
value = "_FUNC_(start_date, num_months) - Returns the date that is num_months after start_date.",
extended = "start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or"
+ " 'yyyy-MM-dd'. num_months is a number. The time part of start_date is "
+ "ignored.\n"
+ "Example:\n " + " > SELECT _FUNC_('2009-08-31', 1) FROM src LIMIT 1;\n" + " '2009-09-30'")
public class GenericUDFAddMonths extends GenericUDF {
private transient SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
private transient TimestampConverter timestampConverter;
private transient Converter textConverter;
private transient Converter dateWritableConverter;
private transient Converter intWritableConverter;
private transient PrimitiveCategory inputType1;
private transient PrimitiveCategory inputType2;
private final Calendar calendar = Calendar.getInstance();
private final Text output = new Text();
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 2) {
throw new UDFArgumentLengthException("add_months() requires 2 argument, got "
+ arguments.length);
}
if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(0, "Only primitive type arguments are accepted but "
+ arguments[0].getTypeName() + " is passed as first arguments");
}
if (arguments[1].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(1, "Only primitive type arguments are accepted but "
+ arguments[1].getTypeName() + " is passed as second arguments");
}
inputType1 = ((PrimitiveObjectInspector) arguments[0]).getPrimitiveCategory();
ObjectInspector outputOI = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
switch (inputType1) {
case STRING:
case VARCHAR:
case CHAR:
inputType1 = PrimitiveCategory.STRING;
textConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableStringObjectInspector);
break;
case TIMESTAMP:
timestampConverter = new TimestampConverter((PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableTimestampObjectInspector);
break;
case DATE:
dateWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableDateObjectInspector);
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got "
+ inputType1);
}
inputType2 = ((PrimitiveObjectInspector) arguments[1]).getPrimitiveCategory();
if (inputType2 != PrimitiveCategory.INT) {
throw new UDFArgumentTypeException(1,
"ADD_MONTHS() only takes INT types as second argument, got " + inputType2);
}
intWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[1],
PrimitiveObjectInspectorFactory.writableIntObjectInspector);
return outputOI;
}
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (arguments[0].get() == null) {
return null;
}
IntWritable toBeAdded = (IntWritable) intWritableConverter.convert(arguments[1].get());
if (toBeAdded == null) {
return null;
}
Date date;
switch (inputType1) {
case STRING:
String dateString = textConverter.convert(arguments[0].get()).toString();
try {
date = formatter.parse(dateString.toString());
} catch (ParseException e) {
return null;
}
break;
case TIMESTAMP:
Timestamp ts = ((TimestampWritable) timestampConverter.convert(arguments[0].get()))
.getTimestamp();
date = ts;
break;
case DATE:
DateWritable dw = (DateWritable) dateWritableConverter.convert(arguments[0].get());
date = dw.get();
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types, got " + inputType1);
}
int numMonth = toBeAdded.get();
addMonth(date, numMonth);
Date newDate = calendar.getTime();
output.set(formatter.format(newDate));
return output;
}
@Override
public String getDisplayString(String[] children) {
return getStandardDisplayString("add_months", children);
}
protected Calendar addMonth(Date d, int numMonths) {
calendar.setTime(d);
boolean lastDatOfMonth = isLastDayOfMonth(calendar);
calendar.add(Calendar.MONTH, numMonths);
if (lastDatOfMonth) {
int maxDd = calendar.getActualMaximum(Calendar.DAY_OF_MONTH);
calendar.set(Calendar.DAY_OF_MONTH, maxDd);
}
return calendar;
}
protected boolean isLastDayOfMonth(Calendar cal) {
int maxDd = cal.getActualMaximum(Calendar.DAY_OF_MONTH);
int dd = cal.get(Calendar.DAY_OF_MONTH);
return dd == maxDd;
}
}
| apache-2.0 |
maingi4/ProtoBuf.Services | ProtoBuf.Services.WebAPI/IProtoMetaProvider.cs | 227 | using System;
using ProtoBuf.Services.Serialization;
namespace ProtoBuf.Services.WebAPI
{
public interface IProtoMetaProvider
{
string GetMetaData(Type type);
TypeMetaData FromJson(byte[] json);
}
} | apache-2.0 |
camp925/camp925.github.io | _includes/callout.html | 134 | <aside class="callout">
<div class="text-vertical-center">
<h1>A HANDS-ON SUMMER CAMP FOR KIDS AGES 6-14</h1>
</div>
</aside>
| apache-2.0 |
awslabs/aws-sdk-cpp | aws-cpp-sdk-rds/include/aws/rds/model/VpnDetails.h | 10611 | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rds/RDS_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Xml
{
class XmlNode;
} // namespace Xml
} // namespace Utils
namespace RDS
{
namespace Model
{
/**
* <p>Information about the virtual private network (VPN) between the VMware
* vSphere cluster and the Amazon Web Services website.</p> <p>For more information
* about RDS on VMware, see the <a
* href="https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html">
* RDS on VMware User Guide.</a> </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/VpnDetails">AWS API
* Reference</a></p>
*/
class AWS_RDS_API VpnDetails
{
public:
VpnDetails();
VpnDetails(const Aws::Utils::Xml::XmlNode& xmlNode);
VpnDetails& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
void OutputToStream(Aws::OStream& ostream, const char* location, unsigned index, const char* locationValue) const;
void OutputToStream(Aws::OStream& oStream, const char* location) const;
/**
* <p>The ID of the VPN.</p>
*/
inline const Aws::String& GetVpnId() const{ return m_vpnId; }
/**
* <p>The ID of the VPN.</p>
*/
inline bool VpnIdHasBeenSet() const { return m_vpnIdHasBeenSet; }
/**
* <p>The ID of the VPN.</p>
*/
inline void SetVpnId(const Aws::String& value) { m_vpnIdHasBeenSet = true; m_vpnId = value; }
/**
* <p>The ID of the VPN.</p>
*/
inline void SetVpnId(Aws::String&& value) { m_vpnIdHasBeenSet = true; m_vpnId = std::move(value); }
/**
* <p>The ID of the VPN.</p>
*/
inline void SetVpnId(const char* value) { m_vpnIdHasBeenSet = true; m_vpnId.assign(value); }
/**
* <p>The ID of the VPN.</p>
*/
inline VpnDetails& WithVpnId(const Aws::String& value) { SetVpnId(value); return *this;}
/**
* <p>The ID of the VPN.</p>
*/
inline VpnDetails& WithVpnId(Aws::String&& value) { SetVpnId(std::move(value)); return *this;}
/**
* <p>The ID of the VPN.</p>
*/
inline VpnDetails& WithVpnId(const char* value) { SetVpnId(value); return *this;}
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline const Aws::String& GetVpnTunnelOriginatorIP() const{ return m_vpnTunnelOriginatorIP; }
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline bool VpnTunnelOriginatorIPHasBeenSet() const { return m_vpnTunnelOriginatorIPHasBeenSet; }
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline void SetVpnTunnelOriginatorIP(const Aws::String& value) { m_vpnTunnelOriginatorIPHasBeenSet = true; m_vpnTunnelOriginatorIP = value; }
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline void SetVpnTunnelOriginatorIP(Aws::String&& value) { m_vpnTunnelOriginatorIPHasBeenSet = true; m_vpnTunnelOriginatorIP = std::move(value); }
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline void SetVpnTunnelOriginatorIP(const char* value) { m_vpnTunnelOriginatorIPHasBeenSet = true; m_vpnTunnelOriginatorIP.assign(value); }
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline VpnDetails& WithVpnTunnelOriginatorIP(const Aws::String& value) { SetVpnTunnelOriginatorIP(value); return *this;}
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline VpnDetails& WithVpnTunnelOriginatorIP(Aws::String&& value) { SetVpnTunnelOriginatorIP(std::move(value)); return *this;}
/**
* <p>The IP address of network traffic from your on-premises data center. A custom
* AZ receives the network traffic.</p>
*/
inline VpnDetails& WithVpnTunnelOriginatorIP(const char* value) { SetVpnTunnelOriginatorIP(value); return *this;}
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline const Aws::String& GetVpnGatewayIp() const{ return m_vpnGatewayIp; }
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline bool VpnGatewayIpHasBeenSet() const { return m_vpnGatewayIpHasBeenSet; }
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline void SetVpnGatewayIp(const Aws::String& value) { m_vpnGatewayIpHasBeenSet = true; m_vpnGatewayIp = value; }
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline void SetVpnGatewayIp(Aws::String&& value) { m_vpnGatewayIpHasBeenSet = true; m_vpnGatewayIp = std::move(value); }
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline void SetVpnGatewayIp(const char* value) { m_vpnGatewayIpHasBeenSet = true; m_vpnGatewayIp.assign(value); }
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline VpnDetails& WithVpnGatewayIp(const Aws::String& value) { SetVpnGatewayIp(value); return *this;}
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline VpnDetails& WithVpnGatewayIp(Aws::String&& value) { SetVpnGatewayIp(std::move(value)); return *this;}
/**
* <p>The IP address of network traffic from Amazon Web Services to your
* on-premises data center.</p>
*/
inline VpnDetails& WithVpnGatewayIp(const char* value) { SetVpnGatewayIp(value); return *this;}
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline const Aws::String& GetVpnPSK() const{ return m_vpnPSK; }
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline bool VpnPSKHasBeenSet() const { return m_vpnPSKHasBeenSet; }
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline void SetVpnPSK(const Aws::String& value) { m_vpnPSKHasBeenSet = true; m_vpnPSK = value; }
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline void SetVpnPSK(Aws::String&& value) { m_vpnPSKHasBeenSet = true; m_vpnPSK = std::move(value); }
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline void SetVpnPSK(const char* value) { m_vpnPSKHasBeenSet = true; m_vpnPSK.assign(value); }
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline VpnDetails& WithVpnPSK(const Aws::String& value) { SetVpnPSK(value); return *this;}
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline VpnDetails& WithVpnPSK(Aws::String&& value) { SetVpnPSK(std::move(value)); return *this;}
/**
* <p>The preshared key (PSK) for the VPN.</p>
*/
inline VpnDetails& WithVpnPSK(const char* value) { SetVpnPSK(value); return *this;}
/**
* <p>The name of the VPN.</p>
*/
inline const Aws::String& GetVpnName() const{ return m_vpnName; }
/**
* <p>The name of the VPN.</p>
*/
inline bool VpnNameHasBeenSet() const { return m_vpnNameHasBeenSet; }
/**
* <p>The name of the VPN.</p>
*/
inline void SetVpnName(const Aws::String& value) { m_vpnNameHasBeenSet = true; m_vpnName = value; }
/**
* <p>The name of the VPN.</p>
*/
inline void SetVpnName(Aws::String&& value) { m_vpnNameHasBeenSet = true; m_vpnName = std::move(value); }
/**
* <p>The name of the VPN.</p>
*/
inline void SetVpnName(const char* value) { m_vpnNameHasBeenSet = true; m_vpnName.assign(value); }
/**
* <p>The name of the VPN.</p>
*/
inline VpnDetails& WithVpnName(const Aws::String& value) { SetVpnName(value); return *this;}
/**
* <p>The name of the VPN.</p>
*/
inline VpnDetails& WithVpnName(Aws::String&& value) { SetVpnName(std::move(value)); return *this;}
/**
* <p>The name of the VPN.</p>
*/
inline VpnDetails& WithVpnName(const char* value) { SetVpnName(value); return *this;}
/**
* <p>The state of the VPN.</p>
*/
inline const Aws::String& GetVpnState() const{ return m_vpnState; }
/**
* <p>The state of the VPN.</p>
*/
inline bool VpnStateHasBeenSet() const { return m_vpnStateHasBeenSet; }
/**
* <p>The state of the VPN.</p>
*/
inline void SetVpnState(const Aws::String& value) { m_vpnStateHasBeenSet = true; m_vpnState = value; }
/**
* <p>The state of the VPN.</p>
*/
inline void SetVpnState(Aws::String&& value) { m_vpnStateHasBeenSet = true; m_vpnState = std::move(value); }
/**
* <p>The state of the VPN.</p>
*/
inline void SetVpnState(const char* value) { m_vpnStateHasBeenSet = true; m_vpnState.assign(value); }
/**
* <p>The state of the VPN.</p>
*/
inline VpnDetails& WithVpnState(const Aws::String& value) { SetVpnState(value); return *this;}
/**
* <p>The state of the VPN.</p>
*/
inline VpnDetails& WithVpnState(Aws::String&& value) { SetVpnState(std::move(value)); return *this;}
/**
* <p>The state of the VPN.</p>
*/
inline VpnDetails& WithVpnState(const char* value) { SetVpnState(value); return *this;}
private:
Aws::String m_vpnId;
bool m_vpnIdHasBeenSet;
Aws::String m_vpnTunnelOriginatorIP;
bool m_vpnTunnelOriginatorIPHasBeenSet;
Aws::String m_vpnGatewayIp;
bool m_vpnGatewayIpHasBeenSet;
Aws::String m_vpnPSK;
bool m_vpnPSKHasBeenSet;
Aws::String m_vpnName;
bool m_vpnNameHasBeenSet;
Aws::String m_vpnState;
bool m_vpnStateHasBeenSet;
};
} // namespace Model
} // namespace RDS
} // namespace Aws
| apache-2.0 |
bcbroussard/kubernetes | pkg/api/v1beta3/conversion.go | 10116 | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
"fmt"
"reflect"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
)
func addConversionFuncs() {
// Add non-generated conversion functions
err := api.Scheme.AddConversionFuncs(
convert_v1beta3_Container_To_api_Container,
convert_api_Container_To_v1beta3_Container,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
// Add field conversion funcs.
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Pod",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.phase",
"spec.host":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Node",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ReplicationController",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Event",
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"source":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Namespace",
func(label, value string) (string, string, error) {
switch label {
case "status.phase":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Secret",
func(label, value string) (string, string, error) {
switch label {
case "type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ServiceAccount",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
func convert_v1beta3_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]api.ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_v1beta3_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]api.EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_v1beta3_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_v1beta3_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(api.Lifecycle)
if err := convert_v1beta3_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
if in.SecurityContext.Capabilities != nil {
if !reflect.DeepEqual(in.SecurityContext.Capabilities.Add, in.Capabilities.Add) ||
!reflect.DeepEqual(in.SecurityContext.Capabilities.Drop, in.Capabilities.Drop) {
return fmt.Errorf("container capability settings do not match security context settings, cannot convert")
}
}
if in.SecurityContext.Privileged != nil {
if in.Privileged != *in.SecurityContext.Privileged {
return fmt.Errorf("container privileged settings do not match security context settings, cannot convert")
}
}
}
if in.SecurityContext != nil {
out.SecurityContext = new(api.SecurityContext)
if err := convert_v1beta3_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
return nil
}
func convert_api_Container_To_v1beta3_Container(in *api.Container, out *Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_api_ContainerPort_To_v1beta3_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_api_EnvVar_To_v1beta3_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_api_VolumeMount_To_v1beta3_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(Lifecycle)
if err := convert_api_Lifecycle_To_v1beta3_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
out.SecurityContext = new(SecurityContext)
if err := convert_api_SecurityContext_To_v1beta3_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Privileged != nil {
out.Privileged = *out.SecurityContext.Privileged
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Capabilities != nil {
out.Capabilities = *out.SecurityContext.Capabilities
}
return nil
}
| apache-2.0 |
alibaba/weex | android/sdk/src/main/java/org/apache/weex/ui/action/GraphicActionAddElement.java | 7884 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.weex.ui.action;
import android.support.annotation.NonNull;
import android.support.annotation.RestrictTo;
import android.support.annotation.RestrictTo.Scope;
import android.support.annotation.WorkerThread;
import android.support.v4.util.ArrayMap;
import android.text.TextUtils;
import android.util.Log;
import org.apache.weex.BuildConfig;
import org.apache.weex.WXSDKInstance;
import org.apache.weex.WXSDKManager;
import org.apache.weex.common.WXErrorCode;
import org.apache.weex.dom.transition.WXTransition;
import org.apache.weex.performance.WXAnalyzerDataTransfer;
import org.apache.weex.performance.WXStateRecord;
import org.apache.weex.ui.component.WXComponent;
import org.apache.weex.ui.component.WXVContainer;
import org.apache.weex.utils.WXExceptionUtils;
import org.apache.weex.utils.WXLogUtils;
import org.apache.weex.utils.WXUtils;
import java.util.Arrays;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
public class GraphicActionAddElement extends GraphicActionAbstractAddElement {
private WXVContainer parent;
private WXComponent child;
private GraphicPosition layoutPosition;
private GraphicSize layoutSize;
private boolean isLayoutRTL;
public GraphicActionAddElement(@NonNull WXSDKInstance instance, String ref,
String componentType, String parentRef,
int index,
Map<String, String> style,
Map<String, String> attributes,
Set<String> events,
float[] margins,
float[] paddings,
float[] borders) {
super(instance, ref);
this.mComponentType = componentType;
this.mParentRef = parentRef;
this.mIndex = index;
this.mStyle = style;
this.mAttributes = attributes;
this.mEvents = events;
this.mPaddings = paddings;
this.mMargins = margins;
this.mBorders = borders;
if (instance.getContext() == null) {
return;
}
if (WXAnalyzerDataTransfer.isInteractionLogOpen()){
Log.d(WXAnalyzerDataTransfer.INTERACTION_TAG, "[client][addelementStart]"+instance.getInstanceId()+","+componentType+","+ref);
}
try {
parent = (WXVContainer) WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
long start = WXUtils.getFixUnixTime();
BasicComponentData basicComponentData = new BasicComponentData(ref, mComponentType,
mParentRef);
child = createComponent(instance, parent, basicComponentData);
child.setTransition(WXTransition.fromMap(child.getStyles(), child));
long diff = WXUtils.getFixUnixTime()-start;
instance.getApmForInstance().componentCreateTime += diff;
if (null != parent && parent.isIgnoreInteraction){
child.isIgnoreInteraction = true;
}
if (!child.isIgnoreInteraction ){
Object flag = null;
if (null != child.getAttrs()){
flag = child.getAttrs().get("ignoreInteraction");
}
if ("false".equals(flag) || "0".equals(flag)){
child.isIgnoreInteraction = false;
}else if ("1".equals(flag) || "true".equals(flag) || child.isFixed()){
child.isIgnoreInteraction = true;
}
}
WXStateRecord.getInstance().recordAction(instance.getInstanceId(),"addElement");
} catch (ClassCastException e) {
Map<String, String> ext = new ArrayMap<>();
WXComponent parent = WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
if (mStyle != null && !mStyle.isEmpty()) {
ext.put("child.style", mStyle.toString());
}
if (parent != null && parent.getStyles() != null && !parent.getStyles().isEmpty()) {
ext.put("parent.style", parent.getStyles().toString());
}
if (mAttributes != null && !mAttributes.isEmpty()) {
ext.put("child.attr", mAttributes.toString());
}
if (parent != null && parent.getAttrs() != null && !parent.getAttrs().isEmpty()) {
ext.put("parent.attr", parent.getAttrs().toString());
}
if (mEvents != null && !mEvents.isEmpty()) {
ext.put("child.event", mEvents.toString());
}
if (parent != null && parent.getEvents() != null && !parent.getEvents().isEmpty()) {
ext.put("parent.event", parent.getEvents().toString());
}
if (mMargins != null && mMargins.length > 0) {
ext.put("child.margin", Arrays.toString(mMargins));
}
if (parent != null && parent.getMargin() != null) {
ext.put("parent.margin", parent.getMargin().toString());
}
if (mPaddings != null && mPaddings.length > 0) {
ext.put("child.padding", Arrays.toString(mPaddings));
}
if (parent != null && parent.getPadding() != null) {
ext.put("parent.padding", parent.getPadding().toString());
}
if (mBorders != null && mBorders.length > 0) {
ext.put("child.border", Arrays.toString(mBorders));
}
if (parent != null && parent.getBorder() != null) {
ext.put("parent.border", parent.getBorder().toString());
}
WXExceptionUtils.commitCriticalExceptionRT(instance.getInstanceId(),
WXErrorCode.WX_RENDER_ERR_CONTAINER_TYPE,
"GraphicActionAddElement",
String.format(Locale.ENGLISH,"You are trying to add a %s to a %2$s, which is illegal as %2$s is not a container",
componentType,
WXSDKManager.getInstance().getWXRenderManager().getWXComponent(getPageId(), mParentRef).getComponentType()),
ext);
}
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setRTL(boolean isRTL){
this.isLayoutRTL = isRTL;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setSize(GraphicSize graphicSize){
this.layoutSize = graphicSize;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setPosition(GraphicPosition position){
this.layoutPosition = position;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setIndex(int index){
mIndex = index;
}
@Override
public void executeAction() {
super.executeAction();
try {
if (!TextUtils.equals(mComponentType, "video") && !TextUtils.equals(mComponentType, "videoplus"))
child.mIsAddElementToTree = true;
long start = WXUtils.getFixUnixTime();
parent.addChild(child, mIndex);
parent.createChildViewAt(mIndex);
child.setIsLayoutRTL(isLayoutRTL);
if(layoutPosition !=null && layoutSize != null) {
child.setDemission(layoutSize, layoutPosition);
}
child.applyLayoutAndEvent(child);
child.bindData(child);
long diff = WXUtils.getFixUnixTime() - start;
if (null != getWXSDKIntance()){
getWXSDKIntance().getApmForInstance().viewCreateTime +=diff;
}
} catch (Exception e) {
WXLogUtils.e("add component failed.", e);
}
}
}
| apache-2.0 |
googleapis/google-cloud-dotnet | apis/Google.Cloud.ResourceManager.V3/Google.Cloud.ResourceManager.V3.GeneratedSnippets/TagKeysClient.TestIamPermissionsRequestObjectSnippet.g.cs | 1856 | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.ResourceManager.V3.Snippets
{
// [START cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
using Google.Api.Gax;
using Google.Cloud.Iam.V1;
using Google.Cloud.ResourceManager.V3;
public sealed partial class GeneratedTagKeysClientSnippets
{
/// <summary>Snippet for TestIamPermissions</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void TestIamPermissionsRequestObject()
{
// Create client
TagKeysClient tagKeysClient = TagKeysClient.Create();
// Initialize request argument(s)
TestIamPermissionsRequest request = new TestIamPermissionsRequest
{
ResourceAsResourceName = new UnparsedResourceName("a/wildcard/resource"),
Permissions = { "", },
};
// Make the request
TestIamPermissionsResponse response = tagKeysClient.TestIamPermissions(request);
}
}
// [END cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
}
| apache-2.0 |
wyg1990/Mallet | src/main/java/com/intel/mallet/Conf.java | 5783 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.mallet;
import java.util.logging.Logger;
import java.util.Properties;
import java.io.*;
// Configuration parameters.
public class Conf {
private static final Logger logger = Logger.getLogger(Conf.class.getName());
private static Conf conf = new Conf();
private final String baseDirectory; //Base directory of this benchmark
private final String hiveServerHost;
private final String hiveServerPort;
private final int numberOfStreams;
private final String tpcDsToolDirectory;
private final String tempDirectory;
private final String malletDbDir;
private final int scale;
private final String user;
private boolean quickRunMode = false;
private boolean powerTestOnly = false;
private boolean singleQueryMode = false;
private int queryId;
private String dbSettings;
private String getProperty(Properties prop, String key) {
String value = prop.getProperty(key);
if (value == null) {
throw new ExceptionInInitializerError(key + " in conf file not found!");
}
return value;
}
private Conf() {
baseDirectory = System.getProperty("user.dir");
tempDirectory = System.getProperty("java.io.tmpdir");
tpcDsToolDirectory = baseDirectory + "/tools";
String confFile = baseDirectory + "/conf/conf.properties";
Properties prop = new Properties();
try {
FileInputStream in = new FileInputStream(confFile);
prop.load(in);
} catch (FileNotFoundException e) {
throw new ExceptionInInitializerError(e);
} catch (IOException e) {
throw new ExceptionInInitializerError(e);
}
hiveServerHost = getProperty(prop, "hiveServerHost");
hiveServerPort = getProperty(prop, "hiveServerPort");
numberOfStreams = Integer.parseInt(getProperty(prop, "numberOfStreams"));
// Multiple query streams are concurrently executed in a Throughput Test.
// The number of streams is any even number larger or equal to 4.
if (!(numberOfStreams >= 4 && ((numberOfStreams % 2) == 0))) {
throw new ExceptionInInitializerError("Number of streams for Throughput Test must be any even number larger or equal to 4.");
}
scale = Integer.parseInt(getProperty(prop, "scaleFactor"));
// Valid scale factors are 1,100,300,1000,3000,10000,30000,100000
int[] scaleFactors = {1, 100, 300, 1000, 3000, 10000, 30000, 100000};
int i;
for (i = 0; i < scaleFactors.length; i++) {
if (scale == scaleFactors[i]) {
break;
}
}
if (i >= scaleFactors.length) {
throw new ExceptionInInitializerError("Invalid scale factor.");
}
user = getProperty(prop, "user");
malletDbDir = getProperty(prop, "malletDbDir") + "/mallet/DATA";
}
public void parseCommandLine(String[] args) throws MalletException {
boolean argError = false;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
if (arg.equalsIgnoreCase("--quickrun")) {
quickRunMode = true;
} else if (arg.equalsIgnoreCase("--powertest")) {
powerTestOnly = true;
} else if (arg.equalsIgnoreCase("--query")) {
powerTestOnly = true;
singleQueryMode = true;
if ((i + 1) >= args.length) {
argError = true;
break;
}
arg = args[i + 1];
try {
queryId = Integer.parseInt(arg);
} catch (NumberFormatException e) {
argError = true;
break;
}
if (queryId < 1 || queryId > 99) {
argError = true;
break;
}
i++;
} else {
argError = true;
break;
}
}
if (argError) {
throw new MalletException("Invalid command line arguments.");
}
}
public static Conf getConf() {
return conf;
}
public String getBaseDirectory() {
return baseDirectory;
}
public String getHiveServerHost() {
return hiveServerHost;
}
public String getHiveServerPort() {
return hiveServerPort;
}
public int getNumberOfStreams() {
return numberOfStreams;
}
public String getTpcDsToolDirectory() {
return tpcDsToolDirectory;
}
public String getTempDirectory() {
return tempDirectory;
}
public String getMalletDbDirectory() {
return malletDbDir;
}
public int getScale() {
return scale;
}
public String getUser() {
return user;
}
public boolean isQuickRunMode() {
return quickRunMode;
}
public boolean isPowerTestOnly() {
return powerTestOnly;
}
public boolean isSingleQueryMode() {
return singleQueryMode;
}
public int getQueryId() {
return queryId;
}
public String getDbSettings() {
if (dbSettings != null) {
return dbSettings;
}
String dbSettingsFile = getBaseDirectory() + "/conf/hive_settings.hql";
try {
dbSettings = Utility.readHqlFile(dbSettingsFile);
return dbSettings;
} catch (MalletException e) {
return null;
}
}
}
| apache-2.0 |
google/paco | Paco/src/com/pacoapp/paco/ui/SplashActivity.java | 14782 | package com.pacoapp.paco.ui;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.android.apps.paco.AccountChooser;
import com.google.android.gms.auth.GooglePlayServicesAvailabilityException;
import com.google.android.gms.auth.UserRecoverableAuthException;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesUtil;
import com.pacoapp.paco.R;
import com.pacoapp.paco.UserPreferences;
import com.pacoapp.paco.net.AbstractAuthTokenTask;
import com.pacoapp.paco.net.GetAuthTokenInForeground;
import com.pacoapp.paco.net.NetworkClient;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
import android.accounts.OperationCanceledException;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.Dialog;
import android.content.Context;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.Build;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
public class SplashActivity extends Activity implements NetworkClient {
private static Logger Log = LoggerFactory.getLogger(SplashActivity.class);
public static final String EXTRA_ACCOUNTNAME = "extra_accountname";
public static final String EXTRA_CHANGING_EXISTING_ACCOUNT = "extra_changing_existing_account";
public static final int REQUEST_CODE_PICK_ACCOUNT = 1000;
public static final int REQUEST_CODE_RECOVER_FROM_AUTH_ERROR = 1001;
public static final int REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR = 1002;
protected static final int ACCOUNT_CHOOSER_REQUEST_CODE = 55;
private UserPreferences userPrefs;
private boolean changingExistingAccount;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.splash_screen);
Log.debug("SplashActivity onCreate()");
changingExistingAccount = getIntent().getBooleanExtra(EXTRA_CHANGING_EXISTING_ACCOUNT, false);
userPrefs = new UserPreferences(getApplicationContext());
Button loginButton = (Button)findViewById(R.id.loginButton);
loginButton.setOnClickListener(new View.OnClickListener() {
@SuppressLint("NewApi")
@Override
public void onClick(View v) {
authenticateUser();
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == REQUEST_CODE_PICK_ACCOUNT) {
if (resultCode == RESULT_OK) {
userPrefs.saveSelectedAccount(data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME));
changingExistingAccount = false; // unset so that we don't loop in the picker forever
authenticateUser();
} else if (resultCode == RESULT_CANCELED) {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
} else if ((requestCode == REQUEST_CODE_RECOVER_FROM_AUTH_ERROR ||
requestCode == REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR)
&& resultCode == RESULT_OK) {
handleAuthorizeResult(resultCode, data);
return;
}
super.onActivityResult(requestCode, resultCode, data);
}
private void handleAuthorizeResult(int resultCode, Intent data) {
if (data == null) {
show("Unknown error, click the button again");
return;
}
if (resultCode == RESULT_OK) {
Log.info("Retrying");
getTask(this).execute();
return;
}
if (resultCode == RESULT_CANCELED) {
show("User rejected authorization.");
return;
}
show("Unknown error, click the button again");
}
protected void oldonActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == ACCOUNT_CHOOSER_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
String accountName = null;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
accountName = data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME);
} else {
accountName = data.getStringExtra(AccountChooser.ACCOUNT_NAME);
}
if (accountName != null) {
userPrefs.saveSelectedAccount(accountName);
getAuthAccessToken(accountName);
// String token = GoogleAuthUtil.getToken(this, accountName, PacoService.AUTH_TOKEN_TYPE_USERINFO_EMAIL);
// finish();
} else {
finish(); // TODO handler errors
}
} else {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
}
private void getAuthAccessToken(final String accountName) {
AccountManager accountManager = AccountManager.get(this);
Account[] accounts = accountManager.getAccountsByType("com.google");
Account account = null;
for (Account currentAccount : accounts) {
if (currentAccount.name.equals(accountName)) {
account = currentAccount;
break;
}
}
String accessToken = getAccessToken();
if (accessToken != null) {
Log.info("Invalidating previous OAuth2 access token: " + accessToken);
accountManager.invalidateAuthToken(account.type, accessToken);
setAccessToken(null);
}
String authTokenType = AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL;
Log.info("Get access token for " + accountName + " using authTokenType " + authTokenType);
accountManager.getAuthToken(account, authTokenType, null, this,
new AccountManagerCallback<Bundle>() {
@Override
public void run(AccountManagerFuture<Bundle> future) {
try {
String accessToken = future.getResult().getString(AccountManager.KEY_AUTHTOKEN);
Log.info("Got OAuth2 access token: " + accessToken);
setAccessToken(accessToken);
//
// Intent result = new Intent();
// result.putExtra(AccountChooser.ACCOUNT_NAME, accountName);
// SplashActivity.this.setResult(0, result);
SplashActivity.this.finish();
// finish();
} catch (OperationCanceledException e) {
Log.error("The user has denied you access to the API");
} catch (Exception e) {
Log.error(e.getMessage());
Log.error("Exception: ", e);
}
}
}, null);
}
private void setAccessToken(String token) {
userPrefs.setAccessToken(token);
}
private String getAccessToken() {
return userPrefs.getAccessToken();
}
@Override
protected void onResume() {
super.onResume();
//handle case of broken Google Play Services
// TODO remove when we get a build that properly incorporates Google Play Services and resources
// and can build an apk with < 64k methods for Android < 5.0 phones
int resultCode = GooglePlayServicesUtil.isGooglePlayServicesAvailable(getApplicationContext());
if (resultCode != ConnectionResult.SUCCESS) {
try {
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(resultCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
Toast.makeText(getApplicationContext(),
"GooglePlayServices " + getString(R.string.are_not_available_) + " " + getString(R.string.error) + ":\n" + getGooglePlayConnectionErrorString(resultCode),
Toast.LENGTH_LONG).show();
}
} else {
if (changingExistingAccount) {
authenticateUser();
}
}
}
public void authenticateUser() {
if (userPrefs.getSelectedAccount() == null || changingExistingAccount) {
pickUserAccount();
} else {
if (isDeviceOnline()) {
getTask(this).execute();
} else {
Toast.makeText(this, getString(R.string.network_required), Toast.LENGTH_LONG).show();
}
}
}
private AbstractAuthTokenTask getTask(SplashActivity activity) {
return new GetAuthTokenInForeground(activity);
}
@SuppressLint("NewApi")
public void pickUserAccount() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
Account account = null;
if (userPrefs.getSelectedAccount() != null) {
account = getAccountFor(userPrefs.getSelectedAccount());
}
Intent intent = AccountManager.newChooseAccountIntent(account, null,
new String[]{"com.google"},
changingExistingAccount,
null,
AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL,
null, null);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
} else {
Intent intent = new Intent(SplashActivity.this, AccountChooser.class);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
}
}
private Account getAccountFor(String selectedAccount) {
AccountManager am = AccountManager.get(this);
Account[] accounts = am.getAccountsByType("com.google");
for (Account account : accounts) {
if (account.name.equals(selectedAccount)) {
return account;
}
}
return null;
}
/** Checks whether the device currently has a network connection */
private boolean isDeviceOnline() {
ConnectivityManager connMgr = (ConnectivityManager) getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = connMgr.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.isConnected()) {
return true;
}
return false;
}
public void show(final String message) {
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(SplashActivity.this, message, Toast.LENGTH_LONG);
}
});
}
@Override
public void handleException(final Exception e) {
runOnUiThread(new Runnable() {
@Override
public void run() {
if (e instanceof GooglePlayServicesAvailabilityException) {
// The Google Play services APK is old, disabled, or not present.
// Show a dialog created by Google Play services that allows
// the user to update the APK
int statusCode = ((GooglePlayServicesAvailabilityException)e)
.getConnectionStatusCode();
try {
// TODO remove this when we can build Google Play Services in properly
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(statusCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
String gpsError = getGooglePlayConnectionErrorString(statusCode);
Toast.makeText(getApplicationContext(),
getString(R.string.error) + ": " + gpsError,
Toast.LENGTH_LONG).show();
}
} else if (e instanceof UserRecoverableAuthException) {
// Unable to authenticate, such as when the user has not yet granted
// the app access to the account, but the user can fix this.
// Forward the user to an activity in Google Play services.
Intent intent = ((UserRecoverableAuthException)e).getIntent();
startActivityForResult(intent,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
}
}
});
}
public String getGooglePlayConnectionErrorString(int statusCode) {
String gpsError = "unknown";
switch(statusCode) {
case ConnectionResult.API_UNAVAILABLE:
gpsError = "API Unavailable";
break;
case ConnectionResult.CANCELED:
gpsError = "Canceled";
break;
case ConnectionResult.DEVELOPER_ERROR:
gpsError = "Developer Error";
break;
case ConnectionResult.INTERNAL_ERROR:
gpsError = "Internal error";
break;
case ConnectionResult.INTERRUPTED:
gpsError = "Interrupted";
break;
case ConnectionResult.INVALID_ACCOUNT:
gpsError = "Invalid Account";
break;
case ConnectionResult.LICENSE_CHECK_FAILED:
gpsError = "License Check Failed";
break;
case ConnectionResult.NETWORK_ERROR:
gpsError = "Network Error";
break;
case ConnectionResult.RESOLUTION_REQUIRED:
gpsError = "Resolution Required";
break;
case ConnectionResult.SERVICE_DISABLED:
gpsError = "Service Disabled";
break;
case ConnectionResult.SERVICE_INVALID:
gpsError = "Service Invalid";
break;
case ConnectionResult.SERVICE_MISSING:
gpsError = "Service Missing";
break;
case ConnectionResult.SERVICE_VERSION_UPDATE_REQUIRED:
gpsError = "Service version update required";
break;
case ConnectionResult.SIGN_IN_FAILED:
gpsError = "Sign in failed";
break;
case ConnectionResult.SIGN_IN_REQUIRED:
gpsError = "Sign in required";
break;
case ConnectionResult.SUCCESS:
gpsError = "Success";
break;
case ConnectionResult.TIMEOUT:
gpsError = "Timeout";
break;
default:
break;
}
return gpsError;
}
public void showAndFinish(String string) {
show(string);
finish();
}
@Override
public Context getContext() {
return this.getApplicationContext();
}
}
| apache-2.0 |
bptlab/processeditor | src/com/inubit/research/animation/LayoutingAnimator.java | 9066 | /**
*
* Process Editor - Animation Package
*
* (C) 2009, 2010 inubit AG
* (C) 2014 the authors
*
*/
package com.inubit.research.animation;
import java.awt.Point;
import java.util.ArrayList;
import java.util.List;
import net.frapu.code.visualization.Configuration;
import net.frapu.code.visualization.LayoutUtils;
import net.frapu.code.visualization.ProcessEdge;
import net.frapu.code.visualization.ProcessEditor;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessObject;
import com.inubit.research.layouter.LayoutHelper;
import com.inubit.research.layouter.ProcessLayouter;
import com.inubit.research.layouter.WorkBenchSpecific.WorkbenchHandler;
import com.inubit.research.layouter.adapter.ProcessNodeAdapter;
import com.inubit.research.layouter.interfaces.AbstractModelAdapter;
/**
* @author ff
*
*/
public class LayoutingAnimator implements IAnimationListener {
/**
* Configuration Key values
*/
public static final String CONF_ANIMATION_SPEED = "LayouterAnimationSpeed";
private long start;
private ProcessLayouter f_layouter;
private int f_animationTime = -1;
private Animator animator;
private ProcessEditor f_editor;
private boolean f_layoutEdgesValue;
/**
*
*/
public LayoutingAnimator(ProcessLayouter layouter) {
f_layouter = layouter;
}
public ProcessLayouter getLayouter() {
return f_layouter;
}
/**
* Animates the layout of the model.
* @param model
* @param xstart
* @param ystart
* @param direction
* @throws Exception
*/
public void layoutModelWithAnimation(ProcessEditor editor, List<NodeAnimator> animList, int xstart, int ystart, int direction)
throws Exception {
// Animator orgAnimator = editor.getAnimator().getAnimator();
// if (orgAnimator != null) {
// orgAnimator.setRunning(false);
// }
// animator = new Animator(null, 60);
// animator.start();
// animator.setParent(editor);
f_editor = editor;
animator = editor.getAnimator().getAnimator();
ProcessModel model = editor.getModel();
ProcessModel copy = model.clone();
ProcessNode _selNode = findNode(editor.getSelectionHandler().getLastSelectedNode(), copy);
if (_selNode != null) {
ProcessNodeAdapter selectedNode = new ProcessNodeAdapter(_selNode);
f_layouter.setSelectedNode(selectedNode);
} else {
f_layouter.setSelectedNode(null);
}
// Fix all sizes to final size
if (animList != null) {
for (NodeAnimator a : animList) {
if (a instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defA = (DefaultNodeAnimator) a;
// Check if node is contained in copy
if (model.getNodes().contains(defA.getNode())) {
// If found, set target size for layouting
findNode(defA.getNode(), copy).setSize(defA.getNewSize().width, defA.getNewSize().height);
}
}
}
}
Point _offset = determinePartialLayoutingRegion(editor, copy);
AbstractModelAdapter modelI = LayoutUtils.getAdapter(copy);
f_layouter.layoutModel(modelI, xstart, ystart, 0);
WorkbenchHandler.postProcess(f_layouter, copy);
int _animationTime = f_animationTime;
if (_animationTime == -1) {
_animationTime = LayoutHelper.toInt(Configuration.getInstance().getProperty(CONF_ANIMATION_SPEED, "6000"), 6000);
}
//writing back coords to wrappers
ArrayList<NodeAnimator> wrappers = new ArrayList<NodeAnimator>();
for (ProcessNode n : editor.getModel().getNodes()) {
DefaultNodeAnimator w = new DefaultNodeAnimator(n, animator);
w.setAnimationTime(_animationTime);
ProcessNode dup = findNode(n, copy);
if (dup != null) {
Point _pos = applyPartialLayoutingOffsetToNode(_offset, dup);
w.setNewCoords(_pos);
w.setNewSize(dup.getSize());
wrappers.add(w);
}
}
for (ProcessEdge edge : editor.getModel().getEdges()) {
DefaultEdgeAnimator w = new DefaultEdgeAnimator(edge, animator);
w.setAnimationTime(_animationTime);
ProcessEdge _e = (ProcessEdge) copy.getObjectById(edge.getId());
if (copy.getEdges().contains(_e)) {
applyPartialLayoutingOffsetToEdge(_offset, _e);
w.transformTo(_e);
wrappers.add(w);
}
}
// Check if additional animation list @todo Refactor :-)
if (animList != null) {
for (NodeAnimator a : animList) {
if (wrappers.contains(a)) {
//Already contained, modify
NodeAnimator org = wrappers.get(wrappers.indexOf(a));
if (org instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defOrg = (DefaultNodeAnimator) org;
defOrg.setNewSize(((DefaultNodeAnimator) a).getNewSize());
}
}
}
}
if (wrappers.size() > 0) {
wrappers.get(0).addListener(this);
start = System.nanoTime();
}
f_layoutEdgesValue = editor.isLayoutEdges();
editor.setLayoutEdges(false);
animator.setAnimationObjects(wrappers);
}
private void applyPartialLayoutingOffsetToEdge(Point _offset, ProcessEdge _e) {
if (_offset.x != Integer.MAX_VALUE) {
List<Point> _rps = _e.getRoutingPoints();
if (_rps.size() > 2) {
_rps.remove(0);
_rps.remove(_rps.size() - 1);
for (Point p : _rps) {
p.x += _offset.x;
p.y += _offset.y;
}
//setting new routing points
_e.clearRoutingPoints();
for (int i = 0; i < _rps.size(); i++) {
_e.addRoutingPoint(i, _rps.get(i));
}
}
}
}
private Point applyPartialLayoutingOffsetToNode(Point _offset, ProcessNode dup) {
Point _pos = dup.getPos();
if (_offset.x != Integer.MAX_VALUE) {
_pos.x += _offset.x;
_pos.y += _offset.y;
}
return _pos;
}
/**
* used for partial layouting (if just some node are selected)
* @param editor
* @param copy
* @return
*/
private Point determinePartialLayoutingRegion(ProcessEditor editor,
ProcessModel copy) {
List<ProcessObject> _selectedNodes = editor.getSelectionHandler().getSelection();
Point _offset = new Point(Integer.MAX_VALUE, Integer.MAX_VALUE);
if (_selectedNodes.size() > 1) {
for (ProcessObject o : _selectedNodes) {
if (o instanceof ProcessNode) {
ProcessNode _n = (ProcessNode) o;
_offset.x = Math.min(_offset.x, _n.getPos().x - _n.getSize().width / 2);
_offset.y = Math.min(_offset.y, _n.getPos().y - _n.getSize().height / 2);
}
}
for (ProcessNode n : new ArrayList<ProcessNode>(copy.getNodes())) {
if (!_selectedNodes.contains(n)) {
copy.removeNode(n);
}
}
for (ProcessEdge e : new ArrayList<ProcessEdge>(copy.getEdges())) {
if (!_selectedNodes.contains(e)) {
copy.removeEdge(e);
}
}
}
return _offset;
}
@Override
public void animationFinished(NodeAnimator node) {
node.removeListener(this);
System.out.println("Animation took: " + (System.nanoTime() - start) / 1000000 + " ms");
f_editor.setLayoutEdges(f_layoutEdgesValue);
// Kill Animator thread
//animator.setRunning(false);
}
private ProcessNode findNode(ProcessNode original, ProcessModel copy) {
if (original != null) {
String _id = original.getProperty(ProcessNode.PROP_ID);
for (ProcessNode n : copy.getNodes()) {
if (n.getProperty(ProcessNode.PROP_ID).equals(_id)) {
return n;
}
}
}
return null;
}
/**
* can be used to override the user set animation time for special occassions
* @param time
*/
public void setCustomAnimationTime(int time) {
f_animationTime = time;
}
}
| apache-2.0 |
MikhailHubanau/pentaho-kettle | engine/src/org/pentaho/di/trans/Trans.java | 191825 | //CHECKSTYLE:FileLength:OFF
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Deque;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.vfs.FileName;
import org.apache.commons.vfs.FileObject;
import org.pentaho.di.cluster.SlaveServer;
import org.pentaho.di.core.BlockingBatchingRowSet;
import org.pentaho.di.core.BlockingRowSet;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.Counter;
import org.pentaho.di.core.ExecutorInterface;
import org.pentaho.di.core.ExtensionDataInterface;
import org.pentaho.di.core.KettleEnvironment;
import org.pentaho.di.core.QueueRowSet;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.ResultFile;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.RowSet;
import org.pentaho.di.core.SingleRowRowSet;
import org.pentaho.di.core.database.Database;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.core.database.DatabaseTransactionListener;
import org.pentaho.di.core.database.map.DatabaseConnectionMap;
import org.pentaho.di.core.exception.KettleDatabaseException;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleFileException;
import org.pentaho.di.core.exception.KettleTransException;
import org.pentaho.di.core.exception.KettleValueException;
import org.pentaho.di.core.extension.ExtensionPointHandler;
import org.pentaho.di.core.extension.KettleExtensionPoint;
import org.pentaho.di.core.logging.ChannelLogTable;
import org.pentaho.di.core.logging.HasLogChannelInterface;
import org.pentaho.di.core.logging.KettleLogStore;
import org.pentaho.di.core.logging.LogChannel;
import org.pentaho.di.core.logging.LogChannelInterface;
import org.pentaho.di.core.logging.LogLevel;
import org.pentaho.di.core.logging.LogStatus;
import org.pentaho.di.core.logging.LoggingHierarchy;
import org.pentaho.di.core.logging.LoggingMetric;
import org.pentaho.di.core.logging.LoggingObjectInterface;
import org.pentaho.di.core.logging.LoggingObjectType;
import org.pentaho.di.core.logging.LoggingRegistry;
import org.pentaho.di.core.logging.Metrics;
import org.pentaho.di.core.logging.MetricsLogTable;
import org.pentaho.di.core.logging.MetricsRegistry;
import org.pentaho.di.core.logging.PerformanceLogTable;
import org.pentaho.di.core.logging.StepLogTable;
import org.pentaho.di.core.logging.TransLogTable;
import org.pentaho.di.core.metrics.MetricsDuration;
import org.pentaho.di.core.metrics.MetricsSnapshotInterface;
import org.pentaho.di.core.metrics.MetricsUtil;
import org.pentaho.di.core.parameters.DuplicateParamException;
import org.pentaho.di.core.parameters.NamedParams;
import org.pentaho.di.core.parameters.NamedParamsDefault;
import org.pentaho.di.core.parameters.UnknownParamException;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMeta;
import org.pentaho.di.core.util.EnvUtil;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.variables.Variables;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.core.xml.XMLHandler;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.job.DelegationListener;
import org.pentaho.di.job.Job;
import org.pentaho.di.partition.PartitionSchema;
import org.pentaho.di.repository.ObjectId;
import org.pentaho.di.repository.ObjectRevision;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.repository.RepositoryDirectoryInterface;
import org.pentaho.di.resource.ResourceUtil;
import org.pentaho.di.resource.TopLevelResource;
import org.pentaho.di.trans.cluster.TransSplitter;
import org.pentaho.di.trans.performance.StepPerformanceSnapShot;
import org.pentaho.di.trans.step.BaseStep;
import org.pentaho.di.trans.step.BaseStepData.StepExecutionStatus;
import org.pentaho.di.trans.step.RunThread;
import org.pentaho.di.trans.step.StepAdapter;
import org.pentaho.di.trans.step.StepDataInterface;
import org.pentaho.di.trans.step.StepInitThread;
import org.pentaho.di.trans.step.StepInterface;
import org.pentaho.di.trans.step.StepListener;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.step.StepMetaDataCombi;
import org.pentaho.di.trans.step.StepPartitioningMeta;
import org.pentaho.di.trans.steps.mappinginput.MappingInput;
import org.pentaho.di.trans.steps.mappingoutput.MappingOutput;
import org.pentaho.di.www.AddExportServlet;
import org.pentaho.di.www.AddTransServlet;
import org.pentaho.di.www.PrepareExecutionTransServlet;
import org.pentaho.di.www.SlaveServerTransStatus;
import org.pentaho.di.www.SocketRepository;
import org.pentaho.di.www.StartExecutionTransServlet;
import org.pentaho.di.www.WebResult;
import org.pentaho.metastore.api.IMetaStore;
/**
* This class represents the information and operations associated with the concept of a Transformation. It loads,
* instantiates, initializes, runs, and monitors the execution of the transformation contained in the specified
* TransInfo object.
*
* @author Matt
* @since 07-04-2003
*
*/
public class Trans implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface,
ExecutorInterface, ExtensionDataInterface {
/** The package name, used for internationalization of messages. */
private static Class<?> PKG = Trans.class; // for i18n purposes, needed by Translator2!!
/** The replay date format. */
public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss";
/** The log channel interface. */
protected LogChannelInterface log;
/** The log level. */
protected LogLevel logLevel = LogLevel.BASIC;
/** The container object id. */
protected String containerObjectId;
/** The log commit size. */
protected int logCommitSize = 10;
/** The transformation metadata to execute. */
protected TransMeta transMeta;
/**
* The repository we are referencing.
*/
protected Repository repository;
/**
* The MetaStore to use
*/
protected IMetaStore metaStore;
/**
* The job that's launching this transformation. This gives us access to the whole chain, including the parent
* variables, etc.
*/
private Job parentJob;
/**
* The transformation that is executing this transformation in case of mappings.
*/
private Trans parentTrans;
/** The parent logging object interface (this could be a transformation or a job). */
private LoggingObjectInterface parent;
/** The name of the mapping step that executes this transformation in case this is a mapping. */
private String mappingStepName;
/** Indicates that we want to monitor the running transformation in a GUI. */
private boolean monitored;
/**
* Indicates that we are running in preview mode...
*/
private boolean preview;
/** The date objects for logging information about the transformation such as start and end time, etc. */
private Date startDate, endDate, currentDate, logDate, depDate;
/** The job start and end date. */
private Date jobStartDate, jobEndDate;
/** The batch id. */
private long batchId;
/**
* This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the
* transformation's batch id.
*/
private long passedBatchId;
/** The variable bindings for the transformation. */
private VariableSpace variables = new Variables();
/** A list of all the row sets. */
private List<RowSet> rowsets;
/** A list of all the steps. */
private List<StepMetaDataCombi> steps;
/** The class number. */
public int class_nr;
/**
* The replayDate indicates that this transformation is a replay transformation for a transformation executed on
* replayDate. If replayDate is null, the transformation is not a replay.
*/
private Date replayDate;
/** Constant indicating a dispatch type of 1-to-1. */
public static final int TYPE_DISP_1_1 = 1;
/** Constant indicating a dispatch type of 1-to-N. */
public static final int TYPE_DISP_1_N = 2;
/** Constant indicating a dispatch type of N-to-1. */
public static final int TYPE_DISP_N_1 = 3;
/** Constant indicating a dispatch type of N-to-N. */
public static final int TYPE_DISP_N_N = 4;
/** Constant indicating a dispatch type of N-to-M. */
public static final int TYPE_DISP_N_M = 5;
/** Constant indicating a transformation status of Finished. */
public static final String STRING_FINISHED = "Finished";
/** Constant indicating a transformation status of Finished (with errors). */
public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)";
/** Constant indicating a transformation status of Running. */
public static final String STRING_RUNNING = "Running";
/** Constant indicating a transformation status of Paused. */
public static final String STRING_PAUSED = "Paused";
/** Constant indicating a transformation status of Preparing for execution. */
public static final String STRING_PREPARING = "Preparing executing";
/** Constant indicating a transformation status of Initializing. */
public static final String STRING_INITIALIZING = "Initializing";
/** Constant indicating a transformation status of Waiting. */
public static final String STRING_WAITING = "Waiting";
/** Constant indicating a transformation status of Stopped. */
public static final String STRING_STOPPED = "Stopped";
/** Constant indicating a transformation status of Halting. */
public static final String STRING_HALTING = "Halting";
/** Constant specifying a filename containing XML to inject into a ZIP file created during resource export. */
public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml";
/** Whether safe mode is enabled. */
private boolean safeModeEnabled;
/** The thread name. */
@Deprecated
private String threadName;
/** The transaction ID */
private String transactionId;
/** Whether the transformation is preparing for execution. */
private volatile boolean preparing;
/** Whether the transformation is initializing. */
private boolean initializing;
/** Whether the transformation is running. */
private boolean running;
/** Whether the transformation is finished. */
private final AtomicBoolean finished;
/** Whether the transformation is paused. */
private AtomicBoolean paused;
/** Whether the transformation is stopped. */
private AtomicBoolean stopped;
/** The number of errors that have occurred during execution of the transformation. */
private AtomicInteger errors;
/** Whether the transformation is ready to start. */
private boolean readyToStart;
/** Step performance snapshots. */
private Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots;
/** The step performance snapshot timer. */
private Timer stepPerformanceSnapShotTimer;
/** A list of listeners attached to the transformation. */
private List<TransListener> transListeners;
/** A list of stop-event listeners attached to the transformation. */
private List<TransStoppedListener> transStoppedListeners;
/** In case this transformation starts to delegate work to a local transformation or job */
private List<DelegationListener> delegationListeners;
/** The number of finished steps. */
private int nrOfFinishedSteps;
/** The number of active steps. */
private int nrOfActiveSteps;
/** The named parameters. */
private NamedParams namedParams = new NamedParamsDefault();
/** The socket repository. */
private SocketRepository socketRepository;
/** The transformation log table database connection. */
private Database transLogTableDatabaseConnection;
/** The step performance snapshot sequence number. */
private AtomicInteger stepPerformanceSnapshotSeqNr;
/** The last written step performance sequence number. */
private int lastWrittenStepPerformanceSequenceNr;
/** The last step performance snapshot sequence number added. */
private int lastStepPerformanceSnapshotSeqNrAdded;
/** The active subtransformations. */
private Map<String, Trans> activeSubtransformations;
/** The active subjobs */
private Map<String, Job> activeSubjobs;
/** The step performance snapshot size limit. */
private int stepPerformanceSnapshotSizeLimit;
/** The servlet print writer. */
private PrintWriter servletPrintWriter;
/** The trans finished blocking queue. */
private ArrayBlockingQueue<Object> transFinishedBlockingQueue;
/** The name of the executing server */
private String executingServer;
/** The name of the executing user */
private String executingUser;
private Result previousResult;
protected List<RowMetaAndData> resultRows;
protected List<ResultFile> resultFiles;
/** The command line arguments for the transformation. */
protected String[] arguments;
/**
* A table of named counters.
*/
protected Hashtable<String, Counter> counters;
private HttpServletResponse servletResponse;
private HttpServletRequest servletRequest;
private Map<String, Object> extensionDataMap;
/**
* Instantiates a new transformation.
*/
public Trans() {
finished = new AtomicBoolean( false );
paused = new AtomicBoolean( false );
stopped = new AtomicBoolean( false );
transListeners = Collections.synchronizedList( new ArrayList<TransListener>() );
transStoppedListeners = Collections.synchronizedList( new ArrayList<TransStoppedListener>() );
delegationListeners = new ArrayList<DelegationListener>();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
errors = new AtomicInteger( 0 );
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
lastWrittenStepPerformanceSequenceNr = 0;
activeSubtransformations = new HashMap<String, Trans>();
activeSubjobs = new HashMap<String, Job>();
resultRows = new ArrayList<RowMetaAndData>();
resultFiles = new ArrayList<ResultFile>();
counters = new Hashtable<String, Counter>();
extensionDataMap = new HashMap<String, Object>();
}
/**
* Initializes a transformation from transformation meta-data defined in memory.
*
* @param transMeta
* the transformation meta-data to use.
*/
public Trans( TransMeta transMeta ) {
this( transMeta, null );
}
/**
* Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log
* channel interface (job or transformation) for logging lineage purposes.
*
* @param transMeta
* the transformation meta-data to use.
* @param parent
* the parent job that is executing this transformation
*/
public Trans( TransMeta transMeta, LoggingObjectInterface parent ) {
this();
this.transMeta = transMeta;
setParent( parent );
initializeVariablesFrom( transMeta );
copyParametersFrom( transMeta );
transMeta.activateParameters();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
}
/**
* Sets the parent logging object.
*
* @param parent
* the new parent
*/
public void setParent( LoggingObjectInterface parent ) {
this.parent = parent;
this.log = new LogChannel( this, parent );
this.logLevel = log.getLogLevel();
this.containerObjectId = log.getContainerObjectId();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsPreloaded" ) );
}
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf( transMeta
.nrSteps() ), String.valueOf( transMeta.nrTransHops() ) ) );
}
}
/**
* Sets the default log commit size.
*/
private void setDefaultLogCommitSize() {
String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" );
if ( propLogCommitSize != null ) {
// override the logCommit variable
try {
logCommitSize = Integer.parseInt( propLogCommitSize );
} catch ( Exception ignored ) {
logCommitSize = 10; // ignore parsing error and default to 10
}
}
}
/**
* Gets the log channel interface for the transformation.
*
* @return the log channel
* @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel()
*/
public LogChannelInterface getLogChannel() {
return log;
}
/**
* Sets the log channel interface for the transformation.
*
* @param log
* the new log channel interface
*/
public void setLog( LogChannelInterface log ) {
this.log = log;
}
/**
* Gets the name of the transformation.
*
* @return the transformation name
*/
public String getName() {
if ( transMeta == null ) {
return null;
}
return transMeta.getName();
}
/**
* Instantiates a new transformation using any of the provided parameters including the variable bindings, a
* repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports
* loading a transformation from a file (if the filename is provided but not a repository object) or from a repository
* (if the repository object, repository directory name, and transformation name are specified).
*
* @param parent
* the parent variable space and named params
* @param rep
* the repository
* @param name
* the name of the transformation
* @param dirname
* the dirname the repository directory name
* @param filename
* the filename containing the transformation definition
* @throws KettleException
* if any error occurs during loading, parsing, or creation of the transformation
*/
public <Parent extends VariableSpace & NamedParams> Trans( Parent parent, Repository rep, String name,
String dirname, String filename ) throws KettleException {
this();
try {
if ( rep != null ) {
RepositoryDirectoryInterface repdir = rep.findDirectory( dirname );
if ( repdir != null ) {
this.transMeta = rep.loadTransformation( name, repdir, null, false, null ); // reads last version
} else {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToLoadTransformation", name, dirname ) );
}
} else {
transMeta = new TransMeta( filename, false );
}
this.log = LogChannel.GENERAL;
transMeta.initializeVariablesFrom( parent );
initializeVariablesFrom( parent );
// PDI-3064 do not erase parameters from meta!
// instead of this - copy parameters to actual transformation
this.copyParametersFrom( parent );
this.activateParameters();
this.setDefaultLogCommitSize();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
} catch ( KettleException e ) {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Exception.UnableToOpenTransformation", name ), e );
}
}
/**
* Executes the transformation. This method will prepare the transformation for execution and then start all the
* threads associated with the transformation and its steps.
*
* @param arguments
* the arguments
* @throws KettleException
* if the transformation could not be prepared (initialized)
*/
public void execute( String[] arguments ) throws KettleException {
prepareExecution( arguments );
startThreads();
}
/**
* Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing
* and tracking the steps and hops in the transformation.
*
* @param arguments
* the arguments to use for this transformation
* @throws KettleException
* in case the transformation could not be prepared (initialized)
*/
public void prepareExecution( String[] arguments ) throws KettleException {
preparing = true;
startDate = null;
running = false;
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_START );
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_START );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationPrepareExecution.id, this );
checkCompatibility();
// Set the arguments on the transformation...
//
if ( arguments != null ) {
setArguments( arguments );
}
activateParameters();
transMeta.activateParameters();
if ( transMeta.getName() == null ) {
if ( transMeta.getFilename() != null ) {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForFilename", transMeta
.getFilename() ) );
}
} else {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta
.getName() ) );
}
if ( getArguments() != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NumberOfArgumentsDetected", String
.valueOf( getArguments().length ) ) );
}
}
if ( isSafeModeEnabled() ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName() ) );
}
}
if ( getReplayDate() != null ) {
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ThisIsAReplayTransformation" )
+ df.format( getReplayDate() ) );
} else {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.ThisIsNotAReplayTransformation" ) );
}
}
// setInternalKettleVariables(this); --> Let's not do this, when running
// without file, for example remote, it spoils the fun
// extra check to see if the servlet print writer has some value in case
// folks want to test it locally...
//
if ( servletPrintWriter == null ) {
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
if ( encoding == null ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
} else {
try {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out, encoding ) );
} catch ( UnsupportedEncodingException ex ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
}
}
}
// Keep track of all the row sets and allocated steps
//
steps = new ArrayList<StepMetaDataCombi>();
rowsets = new ArrayList<RowSet>();
List<StepMeta> hopsteps = transMeta.getTransHopSteps( false );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDefferentSteps", String.valueOf( hopsteps
.size() ) ) );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingRowsets" ) );
}
// First allocate all the rowsets required!
// Note that a mapping doesn't receive ANY input or output rowsets...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta thisStep = hopsteps.get( i );
if ( thisStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf( i ), thisStep.getName() ) );
}
List<StepMeta> nextSteps = transMeta.findNextSteps( thisStep );
int nrTargets = nextSteps.size();
for ( int n = 0; n < nrTargets; n++ ) {
// What's the next step?
StepMeta nextStep = nextSteps.get( n );
if ( nextStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
// How many times do we start the source step?
int thisCopies = thisStep.getCopies();
if ( thisCopies < 0 ) {
// This can only happen if a variable is used that didn't resolve to a positive integer value
//
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep.getName() ) );
}
// How many times do we start the target step?
int nextCopies = nextStep.getCopies();
// Are we re-partitioning?
boolean repartitioning;
if ( thisStep.isPartitioned() ) {
repartitioning = !thisStep.getStepPartitioningMeta()
.equals( nextStep.getStepPartitioningMeta() );
} else {
repartitioning = nextStep.isPartitioned();
}
int nrCopies;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.copiesInfo", String.valueOf( thisCopies ), String.valueOf( nextCopies ) ) );
}
int dispatchType;
if ( thisCopies == 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_1_1;
nrCopies = 1;
} else if ( thisCopies == 1 && nextCopies > 1 ) {
dispatchType = TYPE_DISP_1_N;
nrCopies = nextCopies;
} else if ( thisCopies > 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_N_1;
nrCopies = thisCopies;
} else if ( thisCopies == nextCopies && !repartitioning ) {
dispatchType = TYPE_DISP_N_N;
nrCopies = nextCopies;
} else {
// > 1!
dispatchType = TYPE_DISP_N_M;
nrCopies = nextCopies;
} // Allocate a rowset for each destination step
// Allocate the rowsets
//
if ( dispatchType != TYPE_DISP_N_M ) {
for ( int c = 0; c < nrCopies; c++ ) {
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
// This is a temporary patch until the batching rowset has proven
// to be working in all situations.
// Currently there are stalling problems when dealing with small
// amounts of rows.
//
Boolean batchingRowSet =
ValueMeta.convertStringToBoolean( System.getProperty( Const.KETTLE_BATCHING_ROWSET ) );
if ( batchingRowSet != null && batchingRowSet.booleanValue() ) {
rowSet = new BlockingBatchingRowSet( transMeta.getSizeRowset() );
} else {
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
}
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
switch ( dispatchType ) {
case TYPE_DISP_1_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), 0 );
break;
case TYPE_DISP_1_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), c );
break;
case TYPE_DISP_N_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), 0 );
break;
case TYPE_DISP_N_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), c );
break;
default:
break;
}
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
} else {
// For each N source steps we have M target steps
//
// From each input step we go to all output steps.
// This allows maximum flexibility for re-partitioning,
// distribution...
for ( int s = 0; s < thisCopies; s++ ) {
for ( int t = 0; t < nextCopies; t++ ) {
BlockingRowSet rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
rowSet.setThreadNameFromToCopy( thisStep.getName(), s, nextStep.getName(), t );
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
}
}
}
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.AllocatedRowsets", String.valueOf( rowsets.size() ), String.valueOf( i ), thisStep
.getName() )
+ " " );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingStepsAndStepData" ) );
}
// Allocate the steps & the data...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta stepMeta = hopsteps.get( i );
String stepid = stepMeta.getStepID();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), stepid ) );
}
// How many copies are launched of this step?
int nrCopies = stepMeta.getCopies();
if ( log.isDebug() ) {
log
.logDebug( BaseMessages
.getString( PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf( nrCopies ) ) );
}
// At least run once...
for ( int c = 0; c < nrCopies; c++ ) {
// Make sure we haven't started it yet!
if ( !hasStepStarted( stepMeta.getName(), c ) ) {
StepMetaDataCombi combi = new StepMetaDataCombi();
combi.stepname = stepMeta.getName();
combi.copy = c;
// The meta-data
combi.stepMeta = stepMeta;
combi.meta = stepMeta.getStepMetaInterface();
// Allocate the step data
StepDataInterface data = combi.meta.getStepData();
combi.data = data;
// Allocate the step
StepInterface step = combi.meta.getStep( stepMeta, data, c, transMeta, this );
// Copy the variables of the transformation to the step...
// don't share. Each copy of the step has its own variables.
//
step.initializeVariablesFrom( this );
step.setUsingThreadPriorityManagment( transMeta.isUsingThreadPriorityManagment() );
// Pass the connected repository & metaStore to the steps runtime
//
step.setRepository( repository );
step.setMetaStore( metaStore );
// If the step is partitioned, set the partitioning ID and some other
// things as well...
if ( stepMeta.isPartitioned() ) {
List<String> partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs();
if ( partitionIDs != null && partitionIDs.size() > 0 ) {
step.setPartitionID( partitionIDs.get( c ) ); // Pass the partition ID
// to the step
}
}
// Save the step too
combi.step = step;
// Pass logging level and metrics gathering down to the step level.
// /
if ( combi.step instanceof LoggingObjectInterface ) {
LogChannelInterface logChannel = combi.step.getLogChannel();
logChannel.setLogLevel( logLevel );
logChannel.setGatheringMetrics( log.isGatheringMetrics() );
}
// Add to the bunch...
steps.add( combi );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta
.getName(), String.valueOf( c ) ) );
}
}
}
}
// Now we need to verify if certain rowsets are not meant to be for error
// handling...
// Loop over the steps and for every step verify the output rowsets
// If a rowset is going to a target step in the steps error handling
// metadata, set it to the errorRowSet.
// The input rowsets are already in place, so the next step just accepts the
// rows.
// Metadata wise we need to do the same trick in TransMeta
//
for ( int s = 0; s < steps.size(); s++ ) {
StepMetaDataCombi combi = steps.get( s );
if ( combi.stepMeta.isDoingErrorHandling() ) {
combi.step.identifyErrorOutput();
}
}
// Now (optionally) write start log record!
// Make sure we synchronize appropriately to avoid duplicate batch IDs.
//
Object syncObject = this;
if ( parentJob != null ) {
syncObject = parentJob; // parallel execution in a job
}
if ( parentTrans != null ) {
syncObject = parentTrans; // multiple sub-transformations
}
synchronized ( syncObject ) {
calculateBatchIdAndDateRange();
beginProcessing();
}
// Set the partition-to-rowset mapping
//
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepMeta stepMeta = sid.stepMeta;
StepInterface baseStep = sid.step;
baseStep.setPartitioned( stepMeta.isPartitioned() );
// Now let's take a look at the source and target relation
//
// If this source step is not partitioned, and the target step is: it
// means we need to re-partition the incoming data.
// If both steps are partitioned on the same method and schema, we don't
// need to re-partition
// If both steps are partitioned on a different method or schema, we need
// to re-partition as well.
// If both steps are not partitioned, we don't need to re-partition
//
boolean isThisPartitioned = stepMeta.isPartitioned();
PartitionSchema thisPartitionSchema = null;
if ( isThisPartitioned ) {
thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema();
}
boolean isNextPartitioned = false;
StepPartitioningMeta nextStepPartitioningMeta = null;
PartitionSchema nextPartitionSchema = null;
List<StepMeta> nextSteps = transMeta.findNextSteps( stepMeta );
int nrNext = nextSteps.size();
for ( int p = 0; p < nrNext; p++ ) {
StepMeta nextStep = nextSteps.get( p );
if ( nextStep.isPartitioned() ) {
isNextPartitioned = true;
nextStepPartitioningMeta = nextStep.getStepPartitioningMeta();
nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema();
}
}
baseStep.setRepartitioning( StepPartitioningMeta.PARTITIONING_METHOD_NONE );
// If the next step is partitioned differently, set re-partitioning, when
// running locally.
//
if ( ( !isThisPartitioned && isNextPartitioned )
|| ( isThisPartitioned && isNextPartitioned && !thisPartitionSchema.equals( nextPartitionSchema ) ) ) {
baseStep.setRepartitioning( nextStepPartitioningMeta.getMethodType() );
}
// For partitioning to a set of remove steps (repartitioning from a master
// to a set or remote output steps)
//
StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta();
if ( targetStepPartitioningMeta != null ) {
baseStep.setRepartitioning( targetStepPartitioningMeta.getMethodType() );
}
}
preparing = false;
initializing = true;
// Do a topology sort... Over 150 step (copies) things might be slowing down too much.
//
if ( isMonitored() && steps.size() < 150 ) {
doTopologySortOfSteps();
}
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.InitialisingSteps", String.valueOf( steps.size() ) ) );
}
StepInitThread[] initThreads = new StepInitThread[steps.size()];
Thread[] threads = new Thread[steps.size()];
// Initialize all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
// Do the init code in the background!
// Init all steps at once, but ALL steps need to finish before we can
// continue properly!
//
initThreads[i] = new StepInitThread( sid, log );
// Put it in a separate thread!
//
threads[i] = new Thread( initThreads[i] );
threads[i].setName( "init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")" );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i] );
threads[i].start();
}
for ( int i = 0; i < threads.length; i++ ) {
try {
threads[i].join();
ExtensionPointHandler
.callExtensionPoint( log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i] );
} catch ( Exception ex ) {
log.logError( "Error with init thread: " + ex.getMessage(), ex.getMessage() );
log.logError( Const.getStackTracker( ex ) );
}
}
initializing = false;
boolean ok = true;
// All step are initialized now: see if there was one that didn't do it
// correctly!
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
if ( !initThreads[i].isOk() ) {
log.logError( BaseMessages
.getString( PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy ) );
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
ok = false;
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_IDLE );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StepInitialized", combi.stepname
+ "." + combi.copy ) );
}
}
}
if ( !ok ) {
// Halt the other threads as well, signal end-of-the line to the outside
// world...
// Also explicitly call dispose() to clean up resources opened during
// init();
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
// Dispose will overwrite the status, but we set it back right after
// this.
combi.step.dispose( combi.meta, combi.data );
if ( initThreads[i].isOk() ) {
combi.data.setStatus( StepExecutionStatus.STATUS_HALTED );
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
}
// Just for safety, fire the trans finished listeners...
try {
fireTransFinishedListeners();
} catch ( KettleException e ) {
//listeners produces errors
log.logError( BaseMessages.getString( PKG, "Trans.FinishListeners.Exception" ) );
//we will not pass this exception up to prepareExecuton() entry point.
} finally {
// Flag the transformation as finished even if exception was thrown
setFinished( true );
}
// Pass along the log during preview. Otherwise it becomes hard to see
// what went wrong.
//
if ( preview ) {
String logText = KettleLogStore.getAppender().getBuffer( getLogChannelId(), true ).toString();
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR + logText );
} else {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR );
}
}
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_STOP );
KettleEnvironment.setExecutionInformation( this, repository );
readyToStart = true;
}
@SuppressWarnings( "deprecation" )
private void checkCompatibility() {
// If we don't have a previous result and transMeta does have one, someone has been using a deprecated method.
//
if ( transMeta.getPreviousResult() != null && getPreviousResult() == null ) {
setPreviousResult( transMeta.getPreviousResult() );
}
// If we don't have arguments set and TransMeta has, someone has been using a deprecated method.
//
if ( transMeta.getArguments() != null && getArguments() == null ) {
setArguments( transMeta.getArguments() );
}
}
/**
* Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them.
*
* @throws KettleException
* if there is a communication error with a remote output socket.
*/
public void startThreads() throws KettleException {
// Now prepare to start all the threads...
//
nrOfFinishedSteps = 0;
nrOfActiveSteps = 0;
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this );
fireTransStartedListeners();
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
sid.step.markStart();
sid.step.initBeforeStart();
// also attach a Step Listener to detect when we're done...
//
StepListener stepListener = new StepListener() {
public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) {
nrOfActiveSteps++;
if ( nrOfActiveSteps == 1 ) {
// Transformation goes from in-active to active...
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener listener : transListeners ) {
listener.transActive( Trans.this );
}
}
}
}
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
synchronized ( Trans.this ) {
nrOfFinishedSteps++;
if ( nrOfFinishedSteps >= steps.size() ) {
// Set the finished flag
//
setFinished( true );
// Grab the performance statistics one last time (if enabled)
//
addStepPerformanceSnapShot();
try {
fireTransFinishedListeners();
} catch ( Exception e ) {
step.setErrors( step.getErrors() + 1L );
log.logError( getName()
+ " : " + BaseMessages.getString( PKG, "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e );
}
}
// If a step fails with an error, we want to kill/stop the others
// too...
//
if ( step.getErrors() > 0 ) {
log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) );
log.logMinimal( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) );
killAllNoWait();
}
}
}
};
// Make sure this is called first!
//
if ( sid.step instanceof BaseStep ) {
( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener );
} else {
sid.step.addStepListener( stepListener );
}
}
if ( transMeta.isCapturingStepPerformanceSnapShots() ) {
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
stepPerformanceSnapShots = new ConcurrentHashMap<String, List<StepPerformanceSnapShot>>();
// Calculate the maximum number of snapshots to be kept in memory
//
String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() );
if ( Const.isEmpty( limitString ) ) {
limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT );
}
stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 );
// Set a timer to collect the performance data from the running threads...
//
stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() );
TimerTask timerTask = new TimerTask() {
public void run() {
if ( !isFinished() ) {
addStepPerformanceSnapShot();
}
}
};
stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() );
}
// Now start a thread to monitor the running transformation...
//
setFinished( false );
paused.set( false );
stopped.set( false );
transFinishedBlockingQueue = new ArrayBlockingQueue<Object>( 10 );
TransListener transListener = new TransAdapter() {
public void transFinished( Trans trans ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans );
} catch ( KettleException e ) {
throw new RuntimeException( "Error calling extension point at end of transformation", e );
}
// First of all, stop the performance snapshot timer if there is is
// one...
//
if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) {
stepPerformanceSnapShotTimer.cancel();
}
setFinished( true );
running = false; // no longer running
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP );
// If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
// listener...
//
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if ( metricsLogTable.isDefined() ) {
try {
writeMetricsInformation();
} catch ( Exception e ) {
log.logError( "Error writing metrics information", e );
errors.incrementAndGet();
}
}
// Close the unique connections when running database transactionally.
// This will commit or roll back the transaction based on the result of this transformation.
//
if ( transMeta.isUsingUniqueConnections() ) {
trans.closeUniqueDatabaseConnections( getResult() );
}
}
};
// This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
// so on)
//
transListeners.add( 0, transListener );
running = true;
switch ( transMeta.getTransformationType() ) {
case Normal:
// Now start all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi combi = steps.get( i );
RunThread runThread = new RunThread( combi );
Thread thread = new Thread( runThread );
thread.setName( getName() + " - " + combi.stepname );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi );
// Call an extension point at the end of the step
//
combi.step.addStepListener( new StepAdapter() {
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi );
} catch ( KettleException e ) {
throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e );
}
}
} );
thread.start();
}
break;
case SerialSingleThreaded:
new Thread( new Runnable() {
public void run() {
try {
// Always disable thread priority management, it will always slow us
// down...
//
for ( StepMetaDataCombi combi : steps ) {
combi.step.setUsingThreadPriorityManagment( false );
}
//
// This is a single threaded version...
//
// Sort the steps from start to finish...
//
Collections.sort( steps, new Comparator<StepMetaDataCombi>() {
public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) {
boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta );
if ( c1BeforeC2 ) {
return -1;
} else {
return 1;
}
}
} );
boolean[] stepDone = new boolean[steps.size()];
int nrDone = 0;
while ( nrDone < steps.size() && !isStopped() ) {
for ( int i = 0; i < steps.size() && !isStopped(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
if ( !stepDone[i] ) {
// if (combi.step.canProcessOneRow() ||
// !combi.step.isRunning()) {
boolean cont = combi.step.processRow( combi.meta, combi.data );
if ( !cont ) {
stepDone[i] = true;
nrDone++;
}
// }
}
}
}
} catch ( Exception e ) {
errors.addAndGet( 1 );
log.logError( "Error executing single threaded", e );
} finally {
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
combi.step.dispose( combi.meta, combi.data );
combi.step.markStop();
}
}
}
} ).start();
break;
case SingleThreaded:
// Don't do anything, this needs to be handled by the transformation
// executor!
//
break;
default:
break;
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this );
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.TransformationHasAllocated", String.valueOf( steps.size() ), String
.valueOf( rowsets.size() ) ) );
}
}
/**
* Make attempt to fire all registered listeners if possible.
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransFinishedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
if ( transListeners.size() == 0 ) {
return;
}
//prevent Exception from one listener to block others execution
List<KettleException> badGuys = new ArrayList<KettleException>( transListeners.size() );
for ( TransListener transListener : transListeners ) {
try {
transListener.transFinished( this );
} catch ( KettleException e ) {
badGuys.add( e );
}
}
// Signal for the the waitUntilFinished blocker...
transFinishedBlockingQueue.add( new Object() );
if ( !badGuys.isEmpty() ) {
//FIFO
throw new KettleException( badGuys.get( 0 ) );
}
}
}
/**
* Fires the start-event listeners (if any are registered).
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransStartedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener transListener : transListeners ) {
transListener.transStarted( this );
}
}
}
/**
* Adds a step performance snapshot.
*/
protected void addStepPerformanceSnapShot() {
if ( stepPerformanceSnapShots == null ) {
return; // Race condition somewhere?
}
boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty();
boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty();
if ( transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty ) {
// get the statistics from the steps and keep them...
//
int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet();
for ( int i = 0; i < steps.size(); i++ ) {
StepMeta stepMeta = steps.get( i ).stepMeta;
StepInterface step = steps.get( i ).step;
StepPerformanceSnapShot snapShot =
new StepPerformanceSnapShot( seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step
.getCopy(), step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step
.getLinesOutput(), step.getLinesUpdated(), step.getLinesRejected(), step.getErrors() );
List<StepPerformanceSnapShot> snapShotList = stepPerformanceSnapShots.get( step.toString() );
StepPerformanceSnapShot previous;
if ( snapShotList == null ) {
snapShotList = new ArrayList<StepPerformanceSnapShot>();
stepPerformanceSnapShots.put( step.toString(), snapShotList );
previous = null;
} else {
previous = snapShotList.get( snapShotList.size() - 1 ); // the last one...
}
// Make the difference...
//
snapShot.diff( previous, step.rowsetInputSize(), step.rowsetOutputSize() );
synchronized ( stepPerformanceSnapShots ) {
snapShotList.add( snapShot );
if ( stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit ) {
snapShotList.remove( 0 );
}
}
}
lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get();
}
}
/**
* This method performs any cleanup operations, typically called after the transformation has finished. Specifically,
* after ALL the slave transformations in a clustered run have finished.
*/
public void cleanup() {
// Close all open server sockets.
// We can only close these after all processing has been confirmed to be finished.
//
if ( steps == null ) {
return;
}
for ( StepMetaDataCombi combi : steps ) {
combi.step.cleanup();
}
}
/**
* Logs a summary message for the specified step.
*
* @param si
* the step interface
*/
public void logSummary( StepInterface si ) {
log
.logBasic(
si.getStepname(),
BaseMessages
.getString(
PKG,
"Trans.Log.FinishedProcessing", String.valueOf( si.getLinesInput() ), String.valueOf( si
.getLinesOutput() ), String.valueOf( si.getLinesRead() ) )
+ BaseMessages.getString(
PKG, "Trans.Log.FinishedProcessing2", String.valueOf( si.getLinesWritten() ), String.valueOf( si
.getLinesUpdated() ), String.valueOf( si.getErrors() ) ) );
}
/**
* Waits until all RunThreads have finished.
*/
public void waitUntilFinished() {
try {
if ( transFinishedBlockingQueue == null ) {
return;
}
boolean wait = true;
while ( wait ) {
wait = transFinishedBlockingQueue.poll( 1, TimeUnit.DAYS ) == null;
}
} catch ( InterruptedException e ) {
throw new RuntimeException( "Waiting for transformation to be finished interrupted!", e );
}
}
/**
* Gets the number of errors that have occurred during execution of the transformation.
*
* @return the number of errors
*/
public int getErrors() {
int nrErrors = errors.get();
if ( steps == null ) {
return nrErrors;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.step.getErrors() != 0L ) {
nrErrors += sid.step.getErrors();
}
}
if ( nrErrors > 0 ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrorsDetected" ) );
}
return nrErrors;
}
/**
* Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped.
*
* @return the number of ended steps
*/
public int getEnded() {
int nrEnded = 0;
if ( steps == null ) {
return 0;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepDataInterface data = sid.data;
if ( ( sid.step != null && !sid.step.isRunning() ) || // Should normally not be needed anymore, status is kept in
// data.
data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing
data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error
data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error
) {
nrEnded++;
}
}
return nrEnded;
}
/**
* Checks if the transformation is finished\.
*
* @return true if the transformation is finished, false otherwise
*/
public boolean isFinished() {
return finished.get();
}
private void setFinished( boolean newValue ) {
finished.set( newValue );
}
public boolean isFinishedOrStopped() {
return isFinished() || isStopped();
}
/**
* Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is
* marked as Finished.
*/
public void killAll() {
if ( steps == null ) {
return;
}
int nrStepsFinished = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + sid.step.getStepname() );
}
// If thr is a mapping, this is cause for an endless loop
//
while ( sid.step.isRunning() ) {
sid.step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
if ( !sid.step.isRunning() ) {
nrStepsFinished++;
}
}
if ( nrStepsFinished == steps.size() ) {
setFinished( true );
}
}
/**
* Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings.
*/
private void killAllNoWait() {
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + step.getStepname() );
}
step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
}
/**
* Logs the execution statistics for the transformation for the specified time interval. If the total length of
* execution is supplied as the interval, then the statistics represent the average throughput (lines
* read/written/updated/rejected/etc. per second) for the entire execution.
*
* @param seconds
* the time interval (in seconds)
*/
public void printStats( int seconds ) {
log.logBasic( " " );
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
long proc = step.getProcessed();
if ( seconds != 0 ) {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), String.valueOf( ( proc / seconds ) ) ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( proc / seconds ) ) );
}
} else {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), seconds != 0 ? String.valueOf( ( proc / seconds ) ) : "-" ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( seconds ) ) );
}
}
}
}
/**
* Gets a representable metric of the "processed" lines of the last step.
*
* @return the number of lines processed by the last step
*/
public long getLastProcessed() {
if ( steps == null || steps.size() == 0 ) {
return 0L;
}
StepMetaDataCombi sid = steps.get( steps.size() - 1 );
return sid.step.getProcessed();
}
/**
* Finds the RowSet with the specified name.
*
* @param rowsetname
* the rowsetname
* @return the row set, or null if none found
*/
public RowSet findRowSet( String rowsetname ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
// log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads...");
RowSet rs = rowsets.get( i );
if ( rs.getName().equalsIgnoreCase( rowsetname ) ) {
return rs;
}
}
return null;
}
/**
* Finds the RowSet between two steps (or copies of steps).
*
* @param from
* the name of the "from" step
* @param fromcopy
* the copy number of the "from" step
* @param to
* the name of the "to" step
* @param tocopy
* the copy number of the "to" step
* @return the row set, or null if none found
*/
public RowSet findRowSet( String from, int fromcopy, String to, int tocopy ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
RowSet rs = rowsets.get( i );
if ( rs.getOriginStepName().equalsIgnoreCase( from )
&& rs.getDestinationStepName().equalsIgnoreCase( to ) && rs.getOriginStepCopy() == fromcopy
&& rs.getDestinationStepCopy() == tocopy ) {
return rs;
}
}
return null;
}
/**
* Checks whether the specified step (or step copy) has started.
*
* @param sname
* the step name
* @param copy
* the copy number
* @return true the specified step (or step copy) has started, false otherwise
*/
public boolean hasStepStarted( String sname, int copy ) {
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
boolean started = ( sid.stepname != null && sid.stepname.equalsIgnoreCase( sname ) ) && sid.copy == copy;
if ( started ) {
return true;
}
}
return false;
}
/**
* Stops all steps from running, and alerts any registered listeners.
*/
public void stopAll() {
if ( steps == null ) {
return;
}
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
rt.setStopped( true );
rt.resumeRunning();
// Cancel queries etc. by force...
StepInterface si = rt;
try {
si.stopRunning( sid.meta, sid.data );
} catch ( Exception e ) {
log.logError( "Something went wrong while trying to stop the transformation: " + e.toString() );
log.logError( Const.getStackTracker( e ) );
}
sid.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
// if it is stopped it is not paused
paused.set( false );
stopped.set( true );
// Fire the stopped listener...
//
synchronized ( transStoppedListeners ) {
for ( TransStoppedListener listener : transStoppedListeners ) {
listener.transStopped( this );
}
}
}
/**
* Gets the number of steps in this transformation.
*
* @return the number of steps
*/
public int nrSteps() {
if ( steps == null ) {
return 0;
}
return steps.size();
}
/**
* Gets the number of active (i.e. not finished) steps in this transformation
*
* @return the number of active steps
*/
public int nrActiveSteps() {
if ( steps == null ) {
return 0;
}
int nr = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
// without also considering a step status of not finished,
// the step execution results grid shows empty while
// the transformation has steps still running.
// if ( sid.step.isRunning() ) nr++;
if ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ) {
nr++;
}
}
return nr;
}
/**
* Checks whether the transformation steps are running lookup.
*
* @return a boolean array associated with the step list, indicating whether that step is running a lookup.
*/
public boolean[] getTransStepIsRunningLookup() {
if ( steps == null ) {
return null;
}
boolean[] tResult = new boolean[steps.size()];
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
tResult[i] = ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED );
}
return tResult;
}
/**
* Checks the execution status of each step in the transformations.
*
* @return an array associated with the step list, indicating the status of that step.
*/
public StepExecutionStatus[] getTransStepExecutionStatusLookup() {
if ( steps == null ) {
return null;
}
// we need this snapshot for the TransGridDelegate refresh method to handle the
// difference between a timed refresh and continual step status updates
int totalSteps = steps.size();
StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps];
for ( int i = 0; i < totalSteps; i++ ) {
StepMetaDataCombi sid = steps.get( i );
tList[i] = sid.step.getStatus();
}
return tList;
}
/**
* Gets the run thread for the step at the specified index.
*
* @param i
* the index of the desired step
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( int i ) {
if ( steps == null ) {
return null;
}
return steps.get( i ).step;
}
/**
* Gets the run thread for the step with the specified name and copy number.
*
* @param name
* the step name
* @param copy
* the copy number
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( String name, int copy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( name ) && step.getCopy() == copy ) {
return step;
}
}
return null;
}
/**
* Calculate the batch id and date range for the transformation.
*
* @throws KettleTransException
* if there are any errors during calculation
*/
public void calculateBatchIdAndDateRange() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
currentDate = new Date();
logDate = new Date();
startDate = Const.MIN_DATE;
endDate = currentDate;
DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
String logTable = environmentSubstitute( transLogTable.getActualTableName() );
String logSchema = environmentSubstitute( transLogTable.getActualSchemaName() );
try {
if ( logConnection != null ) {
String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination( logSchema, logTable );
if ( Const.isEmpty( logTable ) ) {
// It doesn't make sense to start database logging without a table
// to log to.
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.NoLogTableDefined" ) );
}
if ( Const.isEmpty( transMeta.getName() ) && logConnection != null && logTable != null ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.NoTransnameAvailableForLogging" ) );
}
transLogTableDatabaseConnection = new Database( this, logConnection );
transLogTableDatabaseConnection.shareVariablesWith( this );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningLogConnection", "" + logConnection ) );
}
transLogTableDatabaseConnection.connect();
transLogTableDatabaseConnection.setCommit( logCommitSize );
// See if we have to add a batch id...
// Do this first, before anything else to lock the complete table exclusively
//
if ( transLogTable.isBatchIdUsed() ) {
Long id_batch =
logConnection.getNextBatchId( transLogTableDatabaseConnection, logSchema, logTable, transLogTable
.getKeyField().getFieldName() );
setBatchId( id_batch.longValue() );
}
//
// Get the date range from the logging table: from the last end_date to now. (currentDate)
//
Object[] lastr =
transLogTableDatabaseConnection.getLastLogDate(
logSchemaAndTable, transMeta.getName(), false, LogStatus.END );
if ( lastr != null && lastr.length > 0 ) {
startDate = (Date) lastr[0];
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StartDateFound" ) + startDate );
}
}
//
// OK, we have a date-range.
// However, perhaps we need to look at a table before we make a final judgment?
//
if ( transMeta.getMaxDateConnection() != null
&& transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0
&& transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LookingForMaxdateConnection", ""
+ transMeta.getMaxDateConnection() ) );
}
DatabaseMeta maxcon = transMeta.getMaxDateConnection();
if ( maxcon != null ) {
Database maxdb = new Database( this, maxcon );
maxdb.shareVariablesWith( this );
try {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningMaximumDateConnection" ) );
}
maxdb.connect();
maxdb.setCommit( logCommitSize );
//
// Determine the endDate by looking at a field in a table...
//
String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
RowMetaAndData r1 = maxdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a value, what's the offset?
Date maxvalue = r1.getRowMeta().getDate( r1.getData(), 0 );
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection" )
+ r1 );
}
endDate.setTime( (long) ( maxvalue.getTime() + ( transMeta.getMaxDateOffset() * 1000 ) ) );
}
} else {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection" ) );
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection() ), e );
} finally {
maxdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages
.getString( PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", ""
+ transMeta.getMaxDateConnection() ) );
}
}
// Determine the last date of all dependend tables...
// Get the maximum in depdate...
if ( transMeta.nrDependencies() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.CheckingForMaxDependencyDate" ) );
}
//
// Maybe one of the tables where this transformation is dependent on has changed?
// If so we need to change the start-date!
//
depDate = Const.MIN_DATE;
Date maxdepdate = Const.MIN_DATE;
if ( lastr != null && lastr.length > 0 ) {
Date dep = (Date) lastr[1]; // #1: last depdate
if ( dep != null ) {
maxdepdate = dep;
depDate = dep;
}
}
for ( int i = 0; i < transMeta.nrDependencies(); i++ ) {
TransDependency td = transMeta.getDependency( i );
DatabaseMeta depcon = td.getDatabase();
if ( depcon != null ) {
Database depdb = new Database( this, depcon );
try {
depdb.connect();
depdb.setCommit( logCommitSize );
String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
RowMetaAndData r1 = depdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a row, get the result!
Date maxvalue = (Date) r1.getData()[0];
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = "
+ maxvalue.toString() ) );
}
if ( maxvalue.getTime() > maxdepdate.getTime() ) {
maxdepdate = maxvalue;
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorInDatabase", ""
+ td.getDatabase() ), e );
} finally {
depdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase() ) );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.Maxdepdate" )
+ ( XMLHandler.date2string( maxdepdate ) ) );
}
}
// OK, so we now have the maximum depdate;
// If it is larger, it means we have to read everything back in again.
// Maybe something has changed that we need!
//
if ( maxdepdate.getTime() > depDate.getTime() ) {
depDate = maxdepdate;
startDate = Const.MIN_DATE;
}
} else {
depDate = currentDate;
}
}
// OK, now we have a date-range. See if we need to set a maximum!
if ( transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified?
startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum?
) {
// See if the end-date is larger then Start_date + DIFF?
Date maxdesired = new Date( startDate.getTime() + ( (long) transMeta.getMaxDateDifference() * 1000 ) );
// If this is the case: lower the end-date. Pick up the next 'region' next time around.
// We do this to limit the workload in a single update session (e.g. for large fact tables)
//
if ( endDate.compareTo( maxdesired ) > 0 ) {
endDate = maxdesired;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable ), e );
}
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
/**
* Begin processing. Also handle logging operations related to the start of the transformation
*
* @throws KettleTransException
* the kettle trans exception
*/
public void beginProcessing() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
try {
String logTable = transLogTable.getActualTableName();
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationCanBeReplayed" )
+ df.format( currentDate ) );
try {
if ( transLogTableDatabaseConnection != null
&& !Const.isEmpty( logTable ) && !Const.isEmpty( transMeta.getName() ) ) {
transLogTableDatabaseConnection.writeLogRecord( transLogTable, LogStatus.START, this, null );
// Pass in a commit to release transaction locks and to allow a user to actually see the log record.
//
if ( !transLogTableDatabaseConnection.isAutoCommit() ) {
transLogTableDatabaseConnection.commitLog( true, transLogTable );
}
// If we need to do periodic logging, make sure to install a timer for this...
//
if ( intervalInSeconds > 0 ) {
final Timer timer = new Timer( getName() + " - interval logging timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
endProcessing();
} catch ( Exception e ) {
log
.logError(
BaseMessages.getString( PKG, "Trans.Exception.UnableToPerformIntervalLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
// Add a listener to make sure that the last record is also written when transformation finishes...
//
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
endProcessing();
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END );
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write out the step logging information, do so at the end of the transformation too...
//
StepLogTable stepLogTable = transMeta.getStepLogTable();
if ( stepLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeStepLogInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write the log channel hierarchy and lineage information, add a listener for that too...
//
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if ( channelLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeLogChannelInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// See if we need to write the step performance records at intervals too...
//
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
int perfLogInterval = Const.toInt( environmentSubstitute( performanceLogTable.getLogInterval() ), -1 );
if ( performanceLogTable.isDefined() && perfLogInterval > 0 ) {
final Timer timer = new Timer( getName() + " - step performance log interval timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING );
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, perfLogInterval * 1000, perfLogInterval * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable ), e );
} finally {
// If we use interval logging, we keep the connection open for performance reasons...
//
if ( transLogTableDatabaseConnection != null && ( intervalInSeconds <= 0 ) ) {
transLogTableDatabaseConnection.disconnect();
transLogTableDatabaseConnection = null;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToBeginProcessingTransformation" ), e );
}
}
/**
* Writes log channel information to a channel logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeLogChannelInformation() throws KettleException {
Database db = null;
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
// PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries
Trans t = getParentTrans();
if ( t != null ) {
if ( channelLogTable.equals( t.getTransMeta().getChannelLogTable() ) ) {
return;
}
}
Job j = getParentJob();
if ( j != null ) {
if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) {
return;
}
}
// end PDI-7070
try {
db = new Database( this, channelLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<LoggingHierarchy> loggingHierarchyList = getLoggingHierarchy();
for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) {
db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null );
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( channelLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Writes step information to a step logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeStepLogInformation() throws KettleException {
Database db = null;
StepLogTable stepLogTable = transMeta.getStepLogTable();
try {
db = new Database( this, stepLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
for ( StepMetaDataCombi combi : steps ) {
db.writeLogRecord( stepLogTable, LogStatus.START, combi, null );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
protected synchronized void writeMetricsInformation() throws KettleException {
//
List<MetricsDuration> metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
long total = 0;
metricsList =
MetricsUtil.getDuration(
log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START );
if ( metricsList != null ) {
for ( MetricsDuration duration : metricsList ) {
total += duration.getDuration();
System.out.println( " - " + duration.toString() + " Total=" + total );
}
}
Database db = null;
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
try {
db = new Database( this, metricsLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<String> logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String logChannelId : logChannelIds ) {
Deque<MetricsSnapshotInterface> snapshotList =
MetricsRegistry.getInstance().getSnapshotLists().get( logChannelId );
if ( snapshotList != null ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotList.iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
Map<String, MetricsSnapshotInterface> snapshotMap =
MetricsRegistry.getInstance().getSnapshotMaps().get( logChannelId );
if ( snapshotMap != null ) {
synchronized ( snapshotMap ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotMap.values().iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
}
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( metricsLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteMetricsInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Gets the result of the transformation. The Result object contains such measures as the number of errors, number of
* lines read/written/input/output/updated/rejected, etc.
*
* @return the Result object containing resulting measures from execution of the transformation
*/
public Result getResult() {
if ( steps == null ) {
return null;
}
Result result = new Result();
result.setNrErrors( errors.longValue() );
result.setResult( errors.longValue() == 0 );
TransLogTable transLogTable = transMeta.getTransLogTable();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
result.setNrErrors( result.getNrErrors() + sid.step.getErrors() );
result.getResultFiles().putAll( step.getResultFiles() );
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_READ ) ) ) {
result.setNrLinesRead( result.getNrLinesRead() + step.getLinesRead() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_INPUT ) ) ) {
result.setNrLinesInput( result.getNrLinesInput() + step.getLinesInput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_WRITTEN ) ) ) {
result.setNrLinesWritten( result.getNrLinesWritten() + step.getLinesWritten() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_OUTPUT ) ) ) {
result.setNrLinesOutput( result.getNrLinesOutput() + step.getLinesOutput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_UPDATED ) ) ) {
result.setNrLinesUpdated( result.getNrLinesUpdated() + step.getLinesUpdated() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_REJECTED ) ) ) {
result.setNrLinesRejected( result.getNrLinesRejected() + step.getLinesRejected() );
}
}
result.setRows( resultRows );
if ( !Const.isEmpty( resultFiles ) ) {
result.setResultFiles( new HashMap<String, ResultFile>() );
for ( ResultFile resultFile : resultFiles ) {
result.getResultFiles().put( resultFile.toString(), resultFile );
}
}
result.setStopped( isStopped() );
result.setLogChannelId( log.getLogChannelId() );
return result;
}
/**
* End processing. Also handle any logging operations associated with the end of a transformation
*
* @return true if all end processing is successful, false otherwise
* @throws KettleException
* if any errors occur during processing
*/
private synchronized boolean endProcessing() throws KettleException {
LogStatus status;
if ( isFinished() ) {
if ( isStopped() ) {
status = LogStatus.STOP;
} else {
status = LogStatus.END;
}
} else if ( isPaused() ) {
status = LogStatus.PAUSED;
} else {
status = LogStatus.RUNNING;
}
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
logDate = new Date();
// OK, we have some logging to do...
//
DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta();
String logTable = transMeta.getTransLogTable().getActualTableName();
if ( logcon != null ) {
Database ldb = null;
try {
// Let's not reconnect/disconnect all the time for performance reasons!
//
if ( transLogTableDatabaseConnection == null ) {
ldb = new Database( this, logcon );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
transLogTableDatabaseConnection = ldb;
} else {
ldb = transLogTableDatabaseConnection;
}
// Write to the standard transformation log table...
//
if ( !Const.isEmpty( logTable ) ) {
ldb.writeLogRecord( transLogTable, status, this, null );
}
// Also time-out the log records in here...
//
if ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) {
ldb.cleanupLogRecords( transLogTable );
}
// Commit the operations to prevent locking issues
//
if ( !ldb.isAutoCommit() ) {
ldb.commitLog( true, transMeta.getTransLogTable() );
}
} catch ( KettleDatabaseException e ) {
// PDI-9790 error write to log db is transaction error
log.logError( BaseMessages.getString( PKG, "Database.Error.WriteLogTable", logTable ), e );
errors.incrementAndGet();
//end PDI-9790
} catch ( Exception e ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", transMeta
.getTransLogTable().getActualTableName() ), e );
} finally {
if ( intervalInSeconds <= 0 || ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) ) {
ldb.disconnect();
transLogTableDatabaseConnection = null; // disconnected
}
}
}
return true;
}
/**
* Write step performance log records.
*
* @param startSequenceNr
* the start sequence numberr
* @param status
* the logging status. If this is End, perform cleanup
* @return the new sequence number
* @throws KettleException
* if any errors occur during logging
*/
private int writeStepPerformanceLogRecords( int startSequenceNr, LogStatus status ) throws KettleException {
int lastSeqNr = 0;
Database ldb = null;
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
if ( !performanceLogTable.isDefined()
|| !transMeta.isCapturingStepPerformanceSnapShots() || stepPerformanceSnapShots == null
|| stepPerformanceSnapShots.isEmpty() ) {
return 0; // nothing to do here!
}
try {
ldb = new Database( this, performanceLogTable.getDatabaseMeta() );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
// Write to the step performance log table...
//
RowMetaInterface rowMeta = performanceLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta();
ldb.prepareInsert( rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable
.getActualTableName() );
synchronized ( stepPerformanceSnapShots ) {
Iterator<List<StepPerformanceSnapShot>> iterator = stepPerformanceSnapShots.values().iterator();
while ( iterator.hasNext() ) {
List<StepPerformanceSnapShot> snapshots = iterator.next();
synchronized ( snapshots ) {
Iterator<StepPerformanceSnapShot> snapshotsIterator = snapshots.iterator();
while ( snapshotsIterator.hasNext() ) {
StepPerformanceSnapShot snapshot = snapshotsIterator.next();
if ( snapshot.getSeqNr() >= startSequenceNr
&& snapshot.getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded ) {
RowMetaAndData row = performanceLogTable.getLogRecord( LogStatus.START, snapshot, null );
ldb.setValuesInsert( row.getRowMeta(), row.getData() );
ldb.insertRow( true );
}
lastSeqNr = snapshot.getSeqNr();
}
}
}
}
ldb.insertFinished( true );
// Finally, see if the log table needs cleaning up...
//
if ( status.equals( LogStatus.END ) ) {
ldb.cleanupLogRecords( performanceLogTable );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable" ), e );
} finally {
if ( ldb != null ) {
ldb.disconnect();
}
}
return lastSeqNr + 1;
}
/**
* Close unique database connections. If there are errors in the Result, perform a rollback
*
* @param result
* the result of the transformation execution
*/
private void closeUniqueDatabaseConnections( Result result ) {
// Don't close any connections if the parent job is using the same transaction
//
if ( parentJob != null
&& transactionId != null && parentJob.getTransactionId() != null
&& transactionId.equals( parentJob.getTransactionId() ) ) {
return;
}
// Don't close any connections if the parent transformation is using the same transaction
//
if ( parentTrans != null
&& parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null
&& parentTrans.getTransactionId() != null && transactionId.equals( parentTrans.getTransactionId() ) ) {
return;
}
// First we get all the database connections ...
//
DatabaseConnectionMap map = DatabaseConnectionMap.getInstance();
synchronized ( map ) {
List<Database> databaseList = new ArrayList<Database>( map.getMap().values() );
for ( Database database : databaseList ) {
if ( database.getConnectionGroup().equals( getTransactionId() ) ) {
try {
// This database connection belongs to this transformation.
// Let's roll it back if there is an error...
//
if ( result.getNrErrors() > 0 ) {
try {
database.rollback( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsRolledBackOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString() ), e );
}
} else {
try {
database.commit( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsCommittedOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCommittingUniqueConnection", database.toString() ), e );
}
}
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
try {
// This database connection belongs to this transformation.
database.closeConnectionOnly();
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
// Remove the database from the list...
//
map.removeConnection( database.getConnectionGroup(), database.getPartitionId(), database );
}
}
}
}
// Who else needs to be informed of the rollback or commit?
//
List<DatabaseTransactionListener> transactionListeners = map.getTransactionListeners( getTransactionId() );
if ( result.getNrErrors() > 0 ) {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.rollback();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
} else {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.commit();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
}
}
}
/**
* Find the run thread for the step with the specified name.
*
* @param stepname
* the step name
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface findRunThread( String stepname ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( stepname ) ) {
return step;
}
}
return null;
}
/**
* Find the base steps for the step with the specified name.
*
* @param stepname
* the step name
* @return the list of base steps for the specified step
*/
public List<StepInterface> findBaseSteps( String stepname ) {
List<StepInterface> baseSteps = new ArrayList<StepInterface>();
if ( steps == null ) {
return baseSteps;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
baseSteps.add( stepInterface );
}
}
return baseSteps;
}
/**
* Find the executing step copy for the step with the specified name and copy number
*
* @param stepname
* the step name
* @param copynr
* @return the executing step found or null if no copy could be found.
*/
public StepInterface findStepInterface( String stepname, int copyNr ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) && sid.copy == copyNr ) {
return stepInterface;
}
}
return null;
}
/**
* Find the available executing step copies for the step with the specified name
*
* @param stepname
* the step name
* @param copynr
* @return the list of executing step copies found or null if no steps are available yet (incorrect usage)
*/
public List<StepInterface> findStepInterfaces( String stepname ) {
if ( steps == null ) {
return null;
}
List<StepInterface> list = new ArrayList<StepInterface>();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
list.add( stepInterface );
}
}
return list;
}
/**
* Find the data interface for the step with the specified name.
*
* @param name
* the step name
* @return the step data interface
*/
public StepDataInterface findDataInterface( String name ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
if ( rt.getStepname().equalsIgnoreCase( name ) ) {
return sid.data;
}
}
return null;
}
/**
* Gets the start date/time object for the transformation.
*
* @return Returns the startDate.
*/
public Date getStartDate() {
return startDate;
}
/**
* Gets the end date/time object for the transformation.
*
* @return Returns the endDate.
*/
public Date getEndDate() {
return endDate;
}
/**
* Checks whether the running transformation is being monitored.
*
* @return true the running transformation is being monitored, false otherwise
*/
public boolean isMonitored() {
return monitored;
}
/**
* Sets whether the running transformation should be monitored.
*
* @param monitored
* true if the running transformation should be monitored, false otherwise
*/
public void setMonitored( boolean monitored ) {
this.monitored = monitored;
}
/**
* Gets the meta-data for the transformation.
*
* @return Returns the transformation meta-data
*/
public TransMeta getTransMeta() {
return transMeta;
}
/**
* Sets the meta-data for the transformation.
*
* @param transMeta
* The transformation meta-data to set.
*/
public void setTransMeta( TransMeta transMeta ) {
this.transMeta = transMeta;
}
/**
* Gets the current date/time object.
*
* @return the current date
*/
public Date getCurrentDate() {
return currentDate;
}
/**
* Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of
* these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo,
* now). The use-case is the incremental population of Slowly Changing Dimensions (SCD).
*
* @return Returns the dependency date
*/
public Date getDepDate() {
return depDate;
}
/**
* Gets the date the transformation was logged.
*
* @return the log date
*/
public Date getLogDate() {
return logDate;
}
/**
* Gets the rowsets for the transformation.
*
* @return a list of rowsets
*/
public List<RowSet> getRowsets() {
return rowsets;
}
/**
* Gets a list of steps in the transformation.
*
* @return a list of the steps in the transformation
*/
public List<StepMetaDataCombi> getSteps() {
return steps;
}
/**
* Gets a string representation of the transformation.
*
* @return the string representation of the transformation
* @see java.lang.Object#toString()
*/
public String toString() {
if ( transMeta == null || transMeta.getName() == null ) {
return getClass().getSimpleName();
}
// See if there is a parent transformation. If so, print the name of the parent here as well...
//
StringBuffer string = new StringBuffer();
// If we're running as a mapping, we get a reference to the calling (parent) transformation as well...
//
if ( getParentTrans() != null ) {
string.append( '[' ).append( getParentTrans().toString() ).append( ']' ).append( '.' );
}
// When we run a mapping we also set a mapping step name in there...
//
if ( !Const.isEmpty( mappingStepName ) ) {
string.append( '[' ).append( mappingStepName ).append( ']' ).append( '.' );
}
string.append( transMeta.getName() );
return string.toString();
}
/**
* Gets the mapping inputs for each step in the transformation.
*
* @return an array of MappingInputs
*/
public MappingInput[] findMappingInput() {
if ( steps == null ) {
return null;
}
List<MappingInput> list = new ArrayList<MappingInput>();
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingInput" ) ) {
list.add( (MappingInput) step );
}
}
return list.toArray( new MappingInput[list.size()] );
}
/**
* Gets the mapping outputs for each step in the transformation.
*
* @return an array of MappingOutputs
*/
public MappingOutput[] findMappingOutput() {
List<MappingOutput> list = new ArrayList<MappingOutput>();
if ( steps != null ) {
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingOutput" ) ) {
list.add( (MappingOutput) step );
}
}
}
return list.toArray( new MappingOutput[list.size()] );
}
/**
* Find the StepInterface (thread) by looking it up using the name.
*
* @param stepname
* The name of the step to look for
* @param copy
* the copy number of the step to look for
* @return the StepInterface or null if nothing was found.
*/
public StepInterface getStepInterface( String stepname, int copy ) {
if ( steps == null ) {
return null;
}
// Now start all the threads...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equalsIgnoreCase( stepname ) && sid.copy == copy ) {
return sid.step;
}
}
return null;
}
/**
* Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @return the replay date
*/
public Date getReplayDate() {
return replayDate;
}
/**
* Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @param replayDate
* the new replay date
*/
public void setReplayDate( Date replayDate ) {
this.replayDate = replayDate;
}
/**
* Turn on safe mode during running: the transformation will run slower but with more checking enabled.
*
* @param safeModeEnabled
* true for safe mode
*/
public void setSafeModeEnabled( boolean safeModeEnabled ) {
this.safeModeEnabled = safeModeEnabled;
}
/**
* Checks whether safe mode is enabled.
*
* @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled
*/
public boolean isSafeModeEnabled() {
return safeModeEnabled;
}
/**
* This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute()
* but after prepareExecution()
*
* @param stepname
* The step to produce rows for
* @param copynr
* The copynr of the step to produce row for (normally 0 unless you have multiple copies running)
* @return the row producer
* @throws KettleException
* in case the thread/step to produce rows for could not be found.
* @see Trans#execute(String[])
* @see Trans#prepareExecution(String[])
*/
public RowProducer addRowProducer( String stepname, int copynr ) throws KettleException {
StepInterface stepInterface = getStepInterface( stepname, copynr );
if ( stepInterface == null ) {
throw new KettleException( "Unable to find thread with name " + stepname + " and copy number " + copynr );
}
// We are going to add an extra RowSet to this stepInterface.
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
// Add this rowset to the list of active rowsets for the selected step
stepInterface.getInputRowSets().add( rowSet );
return new RowProducer( stepInterface, rowSet );
}
/**
* Gets the parent job, or null if there is no parent.
*
* @return the parent job, or null if there is no parent
*/
public Job getParentJob() {
return parentJob;
}
/**
* Sets the parent job for the transformation.
*
* @param parentJob
* The parent job to set
*/
public void setParentJob( Job parentJob ) {
this.logLevel = parentJob.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentJob = parentJob;
transactionId = calculateTransactionId();
}
/**
* Finds the StepDataInterface (currently) associated with the specified step.
*
* @param stepname
* The name of the step to look for
* @param stepcopy
* The copy number (0 based) of the step
* @return The StepDataInterface or null if non found.
*/
public StepDataInterface getStepDataInterface( String stepname, int stepcopy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equals( stepname ) && sid.copy == stepcopy ) {
return sid.data;
}
}
return null;
}
/**
* Checks whether the transformation has any steps that are halted.
*
* @return true if one or more steps are halted, false otherwise
*/
public boolean hasHaltedSteps() {
// not yet 100% sure of this, if there are no steps... or none halted?
if ( steps == null ) {
return false;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED ) {
return true;
}
}
return false;
}
/**
* Gets the job start date.
*
* @return the job start date
*/
public Date getJobStartDate() {
return jobStartDate;
}
/**
* Gets the job end date.
*
* @return the job end date
*/
public Date getJobEndDate() {
return jobEndDate;
}
/**
* Sets the job end date.
*
* @param jobEndDate
* the jobEndDate to set
*/
public void setJobEndDate( Date jobEndDate ) {
this.jobEndDate = jobEndDate;
}
/**
* Sets the job start date.
*
* @param jobStartDate
* the jobStartDate to set
*/
public void setJobStartDate( Date jobStartDate ) {
this.jobStartDate = jobStartDate;
}
/**
* Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the
* transformation's batch ID
*
* @return the parent job's batch ID, or the transformation's batch ID if there is no parent job
*/
public long getPassedBatchId() {
return passedBatchId;
}
/**
* Sets the passed batch ID of the transformation from the batch ID of the parent job.
*
* @param jobBatchId
* the jobBatchId to set
*/
public void setPassedBatchId( long jobBatchId ) {
this.passedBatchId = jobBatchId;
}
/**
* Gets the batch ID of the transformation.
*
* @return the batch ID of the transformation
*/
public long getBatchId() {
return batchId;
}
/**
* Sets the batch ID of the transformation.
*
* @param batchId
* the batch ID to set
*/
public void setBatchId( long batchId ) {
this.batchId = batchId;
}
/**
* Gets the name of the thread that contains the transformation.
*
* @deprecated please use getTransactionId() instead
* @return the thread name
*/
@Deprecated
public String getThreadName() {
return threadName;
}
/**
* Sets the thread name for the transformation.
*
* @deprecated please use setTransactionId() instead
* @param threadName
* the thread name
*/
@Deprecated
public void setThreadName( String threadName ) {
this.threadName = threadName;
}
/**
* Gets the status of the transformation (Halting, Finished, Paused, etc.)
*
* @return the status of the transformation
*/
public String getStatus() {
String message;
if ( running ) {
if ( isStopped() ) {
message = STRING_HALTING;
} else {
if ( isFinished() ) {
message = STRING_FINISHED;
if ( getResult().getNrErrors() > 0 ) {
message += " (with errors)";
}
} else if ( isPaused() ) {
message = STRING_PAUSED;
} else {
message = STRING_RUNNING;
}
}
} else if ( isStopped() ) {
message = STRING_STOPPED;
} else if ( preparing ) {
message = STRING_PREPARING;
} else if ( initializing ) {
message = STRING_INITIALIZING;
} else {
message = STRING_WAITING;
}
return message;
}
/**
* Checks whether the transformation is initializing.
*
* @return true if the transformation is initializing, false otherwise
*/
public boolean isInitializing() {
return initializing;
}
/**
* Sets whether the transformation is initializing.
*
* @param initializing
* true if the transformation is initializing, false otherwise
*/
public void setInitializing( boolean initializing ) {
this.initializing = initializing;
}
/**
* Checks whether the transformation is preparing for execution.
*
* @return true if the transformation is preparing for execution, false otherwise
*/
public boolean isPreparing() {
return preparing;
}
/**
* Sets whether the transformation is preparing for execution.
*
* @param preparing
* true if the transformation is preparing for execution, false otherwise
*/
public void setPreparing( boolean preparing ) {
this.preparing = preparing;
}
/**
* Checks whether the transformation is running.
*
* @return true if the transformation is running, false otherwise
*/
public boolean isRunning() {
return running;
}
/**
* Sets whether the transformation is running.
*
* @param running
* true if the transformation is running, false otherwise
*/
public void setRunning( boolean running ) {
this.running = running;
}
/**
* Execute the transformation in a clustered fashion. The transformation steps are split and collected in a
* TransSplitter object
*
* @param transMeta
* the transformation's meta-data
* @param executionConfiguration
* the execution configuration
* @return the transformation splitter object
* @throws KettleException
* the kettle exception
*/
public static final TransSplitter executeClustered( final TransMeta transMeta,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
TransSplitter transSplitter = new TransSplitter( transMeta );
transSplitter.splitOriginalTransformation();
// Pass the clustered run ID to allow for parallel execution of clustered transformations
//
executionConfiguration.getVariables().put(
Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter.getClusteredRunId() );
executeClustered( transSplitter, executionConfiguration );
return transSplitter;
}
/**
* Executes an existing TransSplitter, with the transformation already split.
*
* @param transSplitter
* the trans splitter
* @param executionConfiguration
* the execution configuration
* @throws KettleException
* the kettle exception
* @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate
*/
public static final void executeClustered( final TransSplitter transSplitter,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
try {
// Send the transformations to the servers...
//
// First the master and the slaves...
//
TransMeta master = transSplitter.getMaster();
final SlaveServer[] slaves = transSplitter.getSlaveTargets();
final Thread[] threads = new Thread[slaves.length];
final Throwable[] errors = new Throwable[slaves.length];
// Keep track of the various Carte object IDs
//
final Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
//
// Send them all on their way...
//
SlaveServer masterServer = null;
List<StepMeta> masterSteps = master.getTransHopSteps( false );
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
masterServer = transSplitter.getMasterServer();
if ( executionConfiguration.isClusterPosting() ) {
TransConfiguration transConfiguration = new TransConfiguration( master, executionConfiguration );
Map<String, String> variables = transConfiguration.getTransExecutionConfiguration().getVariables();
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = transConfiguration.getTransExecutionConfiguration().getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String masterReply =
masterServer.sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending the master transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( master, webResult.getId() );
}
}
// Then the slaves...
// These are started in a background thread.
//
for ( int i = 0; i < slaves.length; i++ ) {
final int index = i;
final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
if ( executionConfiguration.isClusterPosting() ) {
Runnable runnable = new Runnable() {
public void run() {
try {
// Create a copy for local use... We get race-conditions otherwise...
//
TransExecutionConfiguration slaveTransExecutionConfiguration =
(TransExecutionConfiguration) executionConfiguration.clone();
TransConfiguration transConfiguration =
new TransConfiguration( slaveTrans, slaveTransExecutionConfiguration );
Map<String, String> variables = slaveTransExecutionConfiguration.getVariables();
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString( index ) );
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName() );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = slaveTransExecutionConfiguration.getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String slaveReply =
slaves[index].sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending a slave transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( slaveTrans, webResult.getId() );
} catch ( Throwable t ) {
errors[index] = t;
}
}
};
threads[i] = new Thread( runnable );
}
}
// Start the slaves
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].start();
}
}
// Wait until the slaves report back...
// Sending the XML over is the heaviest part
// Later we can do the others as well...
//
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].join();
if ( errors[i] != null ) {
throw new KettleException( errors[i] );
}
}
}
if ( executionConfiguration.isClusterPosting() ) {
if ( executionConfiguration.isClusterPreparing() ) {
// Prepare the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Prepare the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
if ( executionConfiguration.isClusterStarting() ) {
// Start the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Start the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
}
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( "There was an error during transformation split", e );
}
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the log interface channel
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob ) {
return monitorClusteredTransformation( log, transSplitter, parentJob, 1 ); // monitor every 1 seconds
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the subject to use for logging
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @param sleepTimeSeconds
* the sleep time in seconds in between slave transformation status polling
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob, int sleepTimeSeconds ) {
long errors = 0L;
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all
// situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask
// these guys
TransMeta[] slaves = transSplitter.getSlaves();
Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
errors++;
}
TransMeta masterTransMeta = transSplitter.getMaster();
boolean allFinished = false;
while ( !allFinished && errors == 0 && ( parentJob == null || !parentJob.isStopped() ) ) {
allFinished = true;
errors = 0L;
// Slaves first...
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
SlaveServerTransStatus transStatus =
slaveServers[s].getTransStatus( slaves[s].getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' has finished." );
}
}
errors += transStatus.getNrStepErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to check slave transformation : " + e.toString() );
}
}
// Check the master too
if ( allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
SlaveServerTransStatus transStatus =
masterServer.getTransStatus( masterTransMeta.getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation has finished." );
}
}
Result result = transStatus.getResult( transSplitter.getOriginalTransformation() );
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to check master transformation : " + e.toString() );
}
}
if ( ( parentJob != null && parentJob.isStopped() ) || errors != 0 ) {
//
// Stop all slaves and the master on the slave servers
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
WebResult webResult = slaveServers[s].stopTransformation( slaves[s].getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop slave transformation '"
+ slaves[s].getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to stop transformation : " + e.toString() );
}
}
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
WebResult webResult = masterServer.stopTransformation( masterTransMeta.getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop master transformation '"
+ masterServer.getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to stop the master : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( "Clustered transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 2000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logBasic( "All transformations in the cluster have finished." );
errors += cleanupCluster( log, transSplitter );
return errors;
}
/**
* Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @return the number of errors that occurred in the clustered transformation
*/
public static int cleanupCluster( LogChannelInterface log, TransSplitter transSplitter ) {
SlaveServer[] slaveServers = transSplitter.getSlaveTargets();
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Unable to obtain the master server from the cluster", e );
return 1;
}
TransMeta masterTransMeta = transSplitter.getMaster();
int errors = 0;
// All transformations have finished, with or without error.
// Now run a cleanup on all the transformation on the master and the slaves.
//
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
cleanupSlaveServer( transSplitter, slaveServers[s], slaves[s] );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to clean up slave transformation", e );
}
}
// Clean up the master too
//
if ( masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
cleanupSlaveServer( transSplitter, masterServer, masterTransMeta );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up master transformation", e );
}
// Also de-allocate all ports used for this clustered transformation on the master.
//
try {
// Deallocate all ports belonging to this clustered run, not anything else
//
masterServer.deAllocateServerSockets( transSplitter.getOriginalTransformation().getName(), transSplitter
.getClusteredRunId() );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up port sockets for transformation'"
+ transSplitter.getOriginalTransformation().getName() + "'", e );
}
}
return errors;
}
/**
* Cleanup the slave server as part of a clustered transformation.
*
* @param transSplitter
* the TransSplitter object
* @param slaveServer
* the slave server
* @param slaveTransMeta
* the slave transformation meta-data
* @throws KettleException
* if any errors occur during cleanup
*/
public static void cleanupSlaveServer( TransSplitter transSplitter, SlaveServer slaveServer,
TransMeta slaveTransMeta ) throws KettleException {
String transName = slaveTransMeta.getName();
try {
String carteObjectId = transSplitter.getCarteObjectMap().get( slaveTransMeta );
WebResult webResult = slaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
throw new KettleException( "Unable to run clean-up on slave server '"
+ slaveServer + "' for transformation '" + transName + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
throw new KettleException( "Unexpected error contacting slave server '"
+ slaveServer + "' to clear up transformation '" + transName + "'", e );
}
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob ) {
return getClusteredTransformationResult( log, transSplitter, parentJob, false );
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @param loggingRemoteWork
* log remote execution logs locally
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob, boolean loggingRemoteWork ) {
Result result = new Result();
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
result.setNrErrors( result.getNrErrors() + 1 );
}
TransMeta master = transSplitter.getMaster();
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), "", 0 );
Result transResult = transStatus.getResult( slaves[s] );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Slave : " + slaveServers[s].getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to get result of slave transformation : " + e.toString() );
}
}
// Clean up the master too
//
if ( master != null && master.nrSteps() > 0 ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = masterServer.getTransStatus( master.getName(), "", 0 );
Result transResult = transStatus.getResult( master );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Master : " + masterServer.getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to get result of master transformation : " + e.toString() );
}
}
return result;
}
/**
* Send the transformation for execution to a Carte slave server.
*
* @param transMeta
* the transformation meta-data
* @param executionConfiguration
* the transformation execution configuration
* @param repository
* the repository
* @return The Carte object ID on the server.
* @throws KettleException
* if any errors occur during the dispatch to the slave server
*/
public static String sendToSlaveServer( TransMeta transMeta, TransExecutionConfiguration executionConfiguration,
Repository repository, IMetaStore metaStore ) throws KettleException {
String carteObjectId;
SlaveServer slaveServer = executionConfiguration.getRemoteServer();
if ( slaveServer == null ) {
throw new KettleException( "No slave server specified" );
}
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
try {
// Inject certain internal variables to make it more intuitive.
//
Map<String, String> vars = new HashMap<String, String>();
for ( String var : Const.INTERNAL_TRANS_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
for ( String var : Const.INTERNAL_JOB_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
executionConfiguration.getVariables().putAll( vars );
slaveServer.injectVariables( executionConfiguration.getVariables() );
slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() );
if ( executionConfiguration.isPassingExport() ) {
// First export the job...
//
FileObject tempFile =
KettleVFS.createTempFile( "transExport", ".zip", System.getProperty( "java.io.tmpdir" ), transMeta );
TopLevelResource topLevelResource =
ResourceUtil.serializeResourceExportInterface(
tempFile.getName().toString(), transMeta, transMeta, repository, metaStore, executionConfiguration
.getXML(), CONFIGURATION_IN_EXPORT_FILENAME );
// Send the zip file over to the slave server...
//
String result =
slaveServer.sendExport(
topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource
.getBaseResourceName() );
WebResult webResult = WebResult.fromXMLString( result );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error passing the exported transformation to the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
} else {
// Now send it off to the remote server...
//
String xml = new TransConfiguration( transMeta, executionConfiguration ).getXML();
String reply = slaveServer.sendXML( xml, AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error posting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
}
// Prepare the transformation
//
String reply =
slaveServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error preparing the transformation for excution on the remote server: "
+ Const.CR + webResult.getMessage() );
}
// Start the transformation
//
reply =
slaveServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error starting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
return carteObjectId;
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( e );
}
}
/**
* Checks whether the transformation is ready to start (i.e. execution preparation was successful)
*
* @return true if the transformation was prepared for execution successfully, false otherwise
* @see org.pentaho.di.trans.Trans#prepareExecution(String[])
*/
public boolean isReadyToStart() {
return readyToStart;
}
/**
* Sets the internal kettle variables.
*
* @param var
* the new internal kettle variables
*/
public void setInternalKettleVariables( VariableSpace var ) {
if ( transMeta != null && !Const.isEmpty( transMeta.getFilename() ) ) // we have a finename that's defined.
{
try {
FileObject fileObject = KettleVFS.getFileObject( transMeta.getFilename(), var );
FileName fileName = fileObject.getName();
// The filename of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() );
// The directory of the transformation
FileName fileDir = fileName.getParent();
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() );
} catch ( KettleFileException e ) {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
} else {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
// The name of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( transMeta.getName(), "" ) );
// TODO PUT THIS INSIDE OF THE "IF"
// The name of the directory in the repository
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta
.getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : "" );
// Here we don't clear the definition of the job specific parameters, as they may come in handy.
// A transformation can be called from a job and may inherit the job internal variables
// but the other around is not possible.
}
/**
* Copies variables from a given variable space to this transformation.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace)
*/
public void copyVariablesFrom( VariableSpace space ) {
variables.copyVariablesFrom( space );
}
/**
* Substitutes any variable values into the given string, and returns the resolved string.
*
* @param aString
* the string to resolve against environment variables
* @return the string after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String)
*/
public String environmentSubstitute( String aString ) {
return variables.environmentSubstitute( aString );
}
/**
* Substitutes any variable values into each of the given strings, and returns an array containing the resolved
* string(s).
*
* @param aString
* an array of strings to resolve against environment variables
* @return the array of strings after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[])
*/
public String[] environmentSubstitute( String[] aString ) {
return variables.environmentSubstitute( aString );
}
public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) throws KettleValueException {
return variables.fieldSubstitute( aString, rowMeta, rowData );
}
/**
* Gets the parent variable space.
*
* @return the parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace()
*/
public VariableSpace getParentVariableSpace() {
return variables.getParentVariableSpace();
}
/**
* Sets the parent variable space.
*
* @param parent
* the new parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void setParentVariableSpace( VariableSpace parent ) {
variables.setParentVariableSpace( parent );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String)
*/
public String getVariable( String variableName, String defaultValue ) {
return variables.getVariable( variableName, defaultValue );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String)
*/
public String getVariable( String variableName ) {
return variables.getVariable( variableName );
}
/**
* Returns a boolean representation of the specified variable after performing any necessary substitution. Truth
* values include case-insensitive versions of "Y", "YES", "TRUE" or "1".
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return a boolean representation of the specified variable after performing any necessary substitution
* @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean)
*/
public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) {
if ( !Const.isEmpty( variableName ) ) {
String value = environmentSubstitute( variableName );
if ( !Const.isEmpty( value ) ) {
return ValueMeta.convertStringToBoolean( value );
}
}
return defaultValue;
}
/**
* Sets the values of the transformation's variables to the values from the parent variables.
*
* @param parent
* the parent
* @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void initializeVariablesFrom( VariableSpace parent ) {
variables.initializeVariablesFrom( parent );
}
/**
* Gets a list of variable names for the transformation.
*
* @return a list of variable names
* @see org.pentaho.di.core.variables.VariableSpace#listVariables()
*/
public String[] listVariables() {
return variables.listVariables();
}
/**
* Sets the value of the specified variable to the specified value.
*
* @param variableName
* the variable name
* @param variableValue
* the variable value
* @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String)
*/
public void setVariable( String variableName, String variableValue ) {
variables.setVariable( variableName, variableValue );
}
/**
* Shares a variable space from another variable space. This means that the object should take over the space used as
* argument.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace)
*/
public void shareVariablesWith( VariableSpace space ) {
variables = space;
}
/**
* Injects variables using the given Map. The behavior should be that the properties object will be stored and at the
* time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After
* injecting the link of the properties object should be removed.
*
* @param prop
* the property map
* @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map)
*/
public void injectVariables( Map<String, String> prop ) {
variables.injectVariables( prop );
}
/**
* Pauses the transformation (pause all steps).
*/
public void pauseRunning() {
paused.set( true );
for ( StepMetaDataCombi combi : steps ) {
combi.step.pauseRunning();
}
}
/**
* Resumes running the transformation after a pause (resume all steps).
*/
public void resumeRunning() {
for ( StepMetaDataCombi combi : steps ) {
combi.step.resumeRunning();
}
paused.set( false );
}
/**
* Checks whether the transformation is being previewed.
*
* @return true if the transformation is being previewed, false otherwise
*/
public boolean isPreview() {
return preview;
}
/**
* Sets whether the transformation is being previewed.
*
* @param preview
* true if the transformation is being previewed, false otherwise
*/
public void setPreview( boolean preview ) {
this.preview = preview;
}
/**
* Gets the repository object for the transformation.
*
* @return the repository
*/
public Repository getRepository() {
if ( repository == null ) {
// Does the transmeta have a repo?
// This is a valid case, when a non-repo trans is attempting to retrieve
// a transformation in the repository.
if ( transMeta != null ) {
return transMeta.getRepository();
}
}
return repository;
}
/**
* Sets the repository object for the transformation.
*
* @param repository
* the repository object to set
*/
public void setRepository( Repository repository ) {
this.repository = repository;
if ( transMeta != null ) {
transMeta.setRepository( repository );
}
}
/**
* Gets a named list (map) of step performance snapshots.
*
* @return a named list (map) of step performance snapshots
*/
public Map<String, List<StepPerformanceSnapShot>> getStepPerformanceSnapShots() {
return stepPerformanceSnapShots;
}
/**
* Sets the named list (map) of step performance snapshots.
*
* @param stepPerformanceSnapShots
* a named list (map) of step performance snapshots to set
*/
public void setStepPerformanceSnapShots( Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots ) {
this.stepPerformanceSnapShots = stepPerformanceSnapShots;
}
/**
* Gets a list of the transformation listeners.
* Please do not attempt to modify this list externally.
* Returned list is mutable only for backward compatibility purposes.
*
* @return the transListeners
*/
public List<TransListener> getTransListeners() {
return transListeners;
}
/**
* Sets the list of transformation listeners.
*
* @param transListeners
* the transListeners to set
*/
public void setTransListeners( List<TransListener> transListeners ) {
this.transListeners = Collections.synchronizedList( transListeners );
}
/**
* Adds a transformation listener.
*
* @param transListener
* the trans listener
*/
public void addTransListener( TransListener transListener ) {
// PDI-5229 sync added
synchronized ( transListeners ) {
transListeners.add( transListener );
}
}
/**
* Sets the list of stop-event listeners for the transformation.
*
* @param transStoppedListeners
* the list of stop-event listeners to set
*/
public void setTransStoppedListeners( List<TransStoppedListener> transStoppedListeners ) {
this.transStoppedListeners = Collections.synchronizedList( transStoppedListeners );
}
/**
* Gets the list of stop-event listeners for the transformation. This is not concurrent safe.
* Please note this is mutable implementation only for backward compatibility reasons.
*
* @return the list of stop-event listeners
*/
public List<TransStoppedListener> getTransStoppedListeners() {
return transStoppedListeners;
}
/**
* Adds a stop-event listener to the transformation.
*
* @param transStoppedListener
* the stop-event listener to add
*/
public void addTransStoppedListener( TransStoppedListener transStoppedListener ) {
transStoppedListeners.add( transStoppedListener );
}
/**
* Checks if the transformation is paused.
*
* @return true if the transformation is paused, false otherwise
*/
public boolean isPaused() {
return paused.get();
}
/**
* Checks if the transformation is stopped.
*
* @return true if the transformation is stopped, false otherwise
*/
public boolean isStopped() {
return stopped.get();
}
/**
* Monitors a remote transformation every 5 seconds.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer ) {
monitorRemoteTransformation( log, carteObjectId, transName, remoteSlaveServer, 5 );
}
/**
* Monitors a remote transformation at the specified interval.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
* @param sleepTimeSeconds
* the sleep time (in seconds)
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer, int sleepTimeSeconds ) {
long errors = 0;
boolean allFinished = false;
while ( !allFinished && errors == 0 ) {
allFinished = true;
errors = 0L;
// Check the remote server
if ( allFinished && errors == 0 ) {
try {
SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus( transName, carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation has finished." );
}
}
Result result = transStatus.getResult();
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact remote slave server '"
+ remoteSlaveServer.getName() + "' to check transformation status : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( transName, "The remote transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 1000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logMinimal( transName, "The remote transformation has finished." );
// Clean up the remote transformation
//
try {
WebResult webResult = remoteSlaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( transName, "Unable to run clean-up on remote transformation '"
+ transName + "' : " + webResult.getMessage() );
errors += 1;
}
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact slave server '"
+ remoteSlaveServer.getName() + "' to clean up transformation : " + e.toString() );
}
}
/**
* Adds a parameter definition to this transformation.
*
* @param key
* the name of the parameter
* @param defValue
* the default value for the parameter
* @param description
* the description of the parameter
* @throws DuplicateParamException
* the duplicate param exception
* @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String,
* java.lang.String)
*/
public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException {
namedParams.addParameterDefinition( key, defValue, description );
}
/**
* Gets the default value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the default value of the parameter
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String)
*/
public String getParameterDefault( String key ) throws UnknownParamException {
return namedParams.getParameterDefault( key );
}
/**
* Gets the description of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter description
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String)
*/
public String getParameterDescription( String key ) throws UnknownParamException {
return namedParams.getParameterDescription( key );
}
/**
* Gets the value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String)
*/
public String getParameterValue( String key ) throws UnknownParamException {
return namedParams.getParameterValue( key );
}
/**
* Gets a list of the parameters for the transformation.
*
* @return an array of strings containing the names of all parameters for the transformation
* @see org.pentaho.di.core.parameters.NamedParams#listParameters()
*/
public String[] listParameters() {
return namedParams.listParameters();
}
/**
* Sets the value for the specified parameter.
*
* @param key
* the name of the parameter
* @param value
* the name of the value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String)
*/
public void setParameterValue( String key, String value ) throws UnknownParamException {
namedParams.setParameterValue( key, value );
}
/**
* Remove all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#eraseParameters()
*/
public void eraseParameters() {
namedParams.eraseParameters();
}
/**
* Clear the values of all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#clearParameters()
*/
public void clearParameters() {
namedParams.clearParameters();
}
/**
* Activates all parameters by setting their values. If no values already exist, the method will attempt to set the
* parameter to the default value. If no default value exists, the method will set the value of the parameter to the
* empty string ("").
*
* @see org.pentaho.di.core.parameters.NamedParams#activateParameters()
*/
public void activateParameters() {
String[] keys = listParameters();
for ( String key : keys ) {
String value;
try {
value = getParameterValue( key );
} catch ( UnknownParamException e ) {
value = "";
}
String defValue;
try {
defValue = getParameterDefault( key );
} catch ( UnknownParamException e ) {
defValue = "";
}
if ( Const.isEmpty( value ) ) {
setVariable( key, Const.NVL( defValue, "" ) );
} else {
setVariable( key, Const.NVL( value, "" ) );
}
}
}
/**
* Copy parameters from a NamedParams object.
*
* @param params
* the NamedParams object from which to copy the parameters
* @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams)
*/
public void copyParametersFrom( NamedParams params ) {
namedParams.copyParametersFrom( params );
}
/**
* Gets the parent transformation, which is null if no parent transformation exists.
*
* @return a reference to the parent transformation's Trans object, or null if no parent transformation exists
*/
public Trans getParentTrans() {
return parentTrans;
}
/**
* Sets the parent transformation.
*
* @param parentTrans
* the parentTrans to set
*/
public void setParentTrans( Trans parentTrans ) {
this.logLevel = parentTrans.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentTrans = parentTrans;
transactionId = calculateTransactionId();
}
/**
* Gets the mapping step name.
*
* @return the name of the mapping step that created this transformation
*/
public String getMappingStepName() {
return mappingStepName;
}
/**
* Sets the mapping step name.
*
* @param mappingStepName
* the name of the mapping step that created this transformation
*/
public void setMappingStepName( String mappingStepName ) {
this.mappingStepName = mappingStepName;
}
/**
* Sets the socket repository.
*
* @param socketRepository
* the new socket repository
*/
public void setSocketRepository( SocketRepository socketRepository ) {
this.socketRepository = socketRepository;
}
/**
* Gets the socket repository.
*
* @return the socket repository
*/
public SocketRepository getSocketRepository() {
return socketRepository;
}
/**
* Gets the object name.
*
* @return the object name
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName()
*/
public String getObjectName() {
return getName();
}
/**
* Gets the object copy. For Trans, this always returns null
*
* @return null
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy()
*/
public String getObjectCopy() {
return null;
}
/**
* Gets the filename of the transformation, or null if no filename exists
*
* @return the filename
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename()
*/
public String getFilename() {
if ( transMeta == null ) {
return null;
}
return transMeta.getFilename();
}
/**
* Gets the log channel ID.
*
* @return the log channel ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId()
*/
public String getLogChannelId() {
return log.getLogChannelId();
}
/**
* Gets the object ID.
*
* @return the object ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId()
*/
public ObjectId getObjectId() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectId();
}
/**
* Gets the object revision.
*
* @return the object revision
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision()
*/
public ObjectRevision getObjectRevision() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectRevision();
}
/**
* Gets the object type. For Trans, this always returns LoggingObjectType.TRANS
*
* @return the object type
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType()
*/
public LoggingObjectType getObjectType() {
return LoggingObjectType.TRANS;
}
/**
* Gets the parent logging object interface.
*
* @return the parent
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent()
*/
public LoggingObjectInterface getParent() {
return parent;
}
/**
* Gets the repository directory.
*
* @return the repository directory
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory()
*/
public RepositoryDirectoryInterface getRepositoryDirectory() {
if ( transMeta == null ) {
return null;
}
return transMeta.getRepositoryDirectory();
}
/**
* Gets the log level.
*
* @return the log level
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel()
*/
public LogLevel getLogLevel() {
return logLevel;
}
/**
* Sets the log level.
*
* @param logLevel
* the new log level
*/
public void setLogLevel( LogLevel logLevel ) {
this.logLevel = logLevel;
log.setLogLevel( logLevel );
}
/**
* Gets the logging hierarchy.
*
* @return the logging hierarchy
*/
public List<LoggingHierarchy> getLoggingHierarchy() {
List<LoggingHierarchy> hierarchy = new ArrayList<LoggingHierarchy>();
List<String> childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String childId : childIds ) {
LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId );
if ( loggingObject != null ) {
hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) );
}
}
return hierarchy;
}
/**
* Gets the active sub-transformations.
*
* @return a map (by name) of the active sub-transformations
*/
public Map<String, Trans> getActiveSubtransformations() {
return activeSubtransformations;
}
/**
* Gets the active sub-jobs.
*
* @return a map (by name) of the active sub-jobs
*/
public Map<String, Job> getActiveSubjobs() {
return activeSubjobs;
}
/**
* Gets the container object ID.
*
* @return the Carte object ID
*/
public String getContainerObjectId() {
return containerObjectId;
}
/**
* Sets the container object ID.
*
* @param containerObjectId
* the Carte object ID to set
*/
public void setContainerObjectId( String containerObjectId ) {
this.containerObjectId = containerObjectId;
}
/**
* Gets the registration date. For Trans, this always returns null
*
* @return null
*/
public Date getRegistrationDate() {
return null;
}
/**
* Sets the servlet print writer.
*
* @param servletPrintWriter
* the new servlet print writer
*/
public void setServletPrintWriter( PrintWriter servletPrintWriter ) {
this.servletPrintWriter = servletPrintWriter;
}
/**
* Gets the servlet print writer.
*
* @return the servlet print writer
*/
public PrintWriter getServletPrintWriter() {
return servletPrintWriter;
}
/**
* Gets the name of the executing server.
*
* @return the executingServer
*/
public String getExecutingServer() {
return executingServer;
}
/**
* Sets the name of the executing server.
*
* @param executingServer
* the executingServer to set
*/
public void setExecutingServer( String executingServer ) {
this.executingServer = executingServer;
}
/**
* Gets the name of the executing user.
*
* @return the executingUser
*/
public String getExecutingUser() {
return executingUser;
}
/**
* Sets the name of the executing user.
*
* @param executingUser
* the executingUser to set
*/
public void setExecutingUser( String executingUser ) {
this.executingUser = executingUser;
}
@Override
public boolean isGatheringMetrics() {
return log != null && log.isGatheringMetrics();
}
@Override
public void setGatheringMetrics( boolean gatheringMetrics ) {
if ( log != null ) {
log.setGatheringMetrics( gatheringMetrics );
}
}
@Override
public boolean isForcingSeparateLogging() {
return log != null && log.isForcingSeparateLogging();
}
@Override
public void setForcingSeparateLogging( boolean forcingSeparateLogging ) {
if ( log != null ) {
log.setForcingSeparateLogging( forcingSeparateLogging );
}
}
public List<ResultFile> getResultFiles() {
return resultFiles;
}
public void setResultFiles( List<ResultFile> resultFiles ) {
this.resultFiles = resultFiles;
}
public List<RowMetaAndData> getResultRows() {
return resultRows;
}
public void setResultRows( List<RowMetaAndData> resultRows ) {
this.resultRows = resultRows;
}
public Result getPreviousResult() {
return previousResult;
}
public void setPreviousResult( Result previousResult ) {
this.previousResult = previousResult;
}
public Hashtable<String, Counter> getCounters() {
return counters;
}
public void setCounters( Hashtable<String, Counter> counters ) {
this.counters = counters;
}
public String[] getArguments() {
return arguments;
}
public void setArguments( String[] arguments ) {
this.arguments = arguments;
}
/**
* Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation
* can continue with other data. This is intended for use when running single threaded.
*/
public void clearError() {
stopped.set( false );
errors.set( 0 );
setFinished( false );
for ( StepMetaDataCombi combi : steps ) {
StepInterface step = combi.step;
for ( RowSet rowSet : step.getInputRowSets() ) {
rowSet.clear();
}
step.setStopped( false );
}
}
/**
* Gets the transaction ID for the transformation.
*
* @return the transactionId
*/
public String getTransactionId() {
return transactionId;
}
/**
* Sets the transaction ID for the transformation.
*
* @param transactionId
* the transactionId to set
*/
public void setTransactionId( String transactionId ) {
this.transactionId = transactionId;
}
/**
* Calculates the transaction ID for the transformation.
*
* @return the calculated transaction ID for the transformation.
*/
public String calculateTransactionId() {
if ( getTransMeta() != null && getTransMeta().isUsingUniqueConnections() ) {
if ( parentJob != null && parentJob.getTransactionId() != null ) {
return parentJob.getTransactionId();
} else if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() ) {
return parentTrans.getTransactionId();
} else {
return DatabaseConnectionMap.getInstance().getNextTransactionId();
}
} else {
return Thread.currentThread().getName();
}
}
public IMetaStore getMetaStore() {
return metaStore;
}
public void setMetaStore( IMetaStore metaStore ) {
this.metaStore = metaStore;
if ( transMeta != null ) {
transMeta.setMetaStore( metaStore );
}
}
/**
* Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and
* set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input
* parameter is null.
*
* @param response
* the HttpServletResponse to set encoding, mayn't be null
*/
public void setServletReponse( HttpServletResponse response ) {
if ( response == null ) {
throw new IllegalArgumentException( "Response is not valid: " + response );
}
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
// true if encoding is null or an empty (also for the next kin of strings: " ")
if ( !StringUtils.isBlank( encoding ) ) {
try {
response.setCharacterEncoding( encoding.trim() );
response.setContentType( "text/html; charset=" + encoding );
} catch ( Exception ex ) {
LogChannel.GENERAL.logError( "Unable to encode data with encoding : '" + encoding + "'", ex );
}
}
this.servletResponse = response;
}
public HttpServletResponse getServletResponse() {
return servletResponse;
}
public void setServletRequest( HttpServletRequest request ) {
this.servletRequest = request;
}
public HttpServletRequest getServletRequest() {
return servletRequest;
}
public List<DelegationListener> getDelegationListeners() {
return delegationListeners;
}
public void setDelegationListeners( List<DelegationListener> delegationListeners ) {
this.delegationListeners = delegationListeners;
}
public void addDelegationListener( DelegationListener delegationListener ) {
delegationListeners.add( delegationListener );
}
public synchronized void doTopologySortOfSteps() {
// The bubble sort algorithm in contrast to the QuickSort or MergeSort
// algorithms
// does indeed cover all possibilities.
// Sorting larger transformations with hundreds of steps might be too slow
// though.
// We should consider caching TransMeta.findPrevious() results in that case.
//
transMeta.clearCaches();
//
// Cocktail sort (bi-directional bubble sort)
//
// Original sort was taking 3ms for 30 steps
// cocktail sort takes about 8ms for the same 30, but it works :)
//
int stepsMinSize = 0;
int stepsSize = steps.size();
// Noticed a problem with an immediate shrinking iteration window
// trapping rows that need to be sorted.
// This threshold buys us some time to get the sorting close before
// starting to decrease the window size.
//
// TODO: this could become much smarter by tracking row movement
// and reacting to that each outer iteration verses
// using a threshold.
//
// After this many iterations enable trimming inner iteration
// window on no change being detected.
//
int windowShrinkThreshold = (int) Math.round( stepsSize * 0.75 );
// give ourselves some room to sort big lists. the window threshold should
// stop us before reaching this anyway.
//
int totalIterations = stepsSize * 2;
boolean isBefore = false;
boolean forwardChange = false;
boolean backwardChange = false;
boolean lastForwardChange = true;
boolean keepSortingForward = true;
StepMetaDataCombi one = null;
StepMetaDataCombi two = null;
for ( int x = 0; x < totalIterations; x++ ) {
// Go forward through the list
//
if ( keepSortingForward ) {
for ( int y = stepsMinSize; y < stepsSize - 1; y++ ) {
one = steps.get( y );
two = steps.get( y + 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( isBefore ) {
// two was found to be positioned BEFORE one so we need to
// switch them...
//
steps.set( y, two );
steps.set( y + 1, one );
forwardChange = true;
}
}
}
// Go backward through the list
//
for ( int z = stepsSize - 1; z > stepsMinSize; z-- ) {
one = steps.get( z );
two = steps.get( z - 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( !isBefore ) {
// two was found NOT to be positioned BEFORE one so we need to
// switch them...
//
steps.set( z, two );
steps.set( z - 1, one );
backwardChange = true;
}
}
// Shrink stepsSize(max) if there was no forward change
//
if ( x > windowShrinkThreshold && !forwardChange ) {
// should we keep going? check the window size
//
stepsSize--;
if ( stepsSize <= stepsMinSize ) {
break;
}
}
// shrink stepsMinSize(min) if there was no backward change
//
if ( x > windowShrinkThreshold && !backwardChange ) {
// should we keep going? check the window size
//
stepsMinSize++;
if ( stepsMinSize >= stepsSize ) {
break;
}
}
// End of both forward and backward traversal.
// Time to see if we should keep going.
//
if ( !forwardChange && !backwardChange ) {
break;
}
//
// if we are past the first iteration and there has been no change twice,
// quit doing it!
//
if ( keepSortingForward && x > 0 && !lastForwardChange && !forwardChange ) {
keepSortingForward = false;
}
lastForwardChange = forwardChange;
forwardChange = false;
backwardChange = false;
} // finished sorting
}
@Override
public Map<String, Object> getExtensionDataMap() {
return extensionDataMap;
}
}
| apache-2.0 |
jsmale/MassTransit | src/MassTransit.RabbitMqTransport.Tests/ConcurrencyFilter_Specs.cs | 2753 | // Copyright 2007-2016 Chris Patterson, Dru Sellers, Travis Smith, et. al.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
namespace MassTransit.RabbitMqTransport.Tests
{
using System.Threading;
using System.Threading.Tasks;
using NUnit.Framework;
[TestFixture]
public class Using_a_consumer_concurrency_limit :
RabbitMqTestFixture
{
[Test]
public async Task Should_limit_the_consumer()
{
_complete = GetTask<bool>();
for (var i = 0; i < _messageCount; i++)
{
Bus.Publish(new A());
}
await _complete.Task;
Assert.AreEqual(1, _consumer.MaxDeliveryCount);
}
Consumer _consumer;
static int _messageCount = 100;
static TaskCompletionSource<bool> _complete;
protected override void ConfigureInputQueueEndpoint(IRabbitMqReceiveEndpointConfigurator configurator)
{
base.ConfigureInputQueueEndpoint(configurator);
_consumer = new Consumer();
configurator.Consumer(() => _consumer, x => x.UseConcurrencyLimit(1));
}
class Consumer :
IConsumer<A>
{
int _currentPendingDeliveryCount;
long _deliveryCount;
int _maxPendingDeliveryCount;
public int MaxDeliveryCount
{
get { return _maxPendingDeliveryCount; }
}
public async Task Consume(ConsumeContext<A> context)
{
Interlocked.Increment(ref _deliveryCount);
var current = Interlocked.Increment(ref _currentPendingDeliveryCount);
while (current > _maxPendingDeliveryCount)
Interlocked.CompareExchange(ref _maxPendingDeliveryCount, current, _maxPendingDeliveryCount);
await Task.Delay(100);
Interlocked.Decrement(ref _currentPendingDeliveryCount);
if (_deliveryCount == _messageCount)
_complete.TrySetResult(true);
}
}
class A
{
}
}
} | apache-2.0 |
derekchiang/keystone | keystone/openstack/common/rpc/zmq_receiver.py | 1154 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from keystone.openstack.common import log as logging
from keystone.openstack.common import rpc
from keystone.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| apache-2.0 |
deeplearning4j/deeplearning4j | arbiter/arbiter-deeplearning4j/src/main/java/org/deeplearning4j/arbiter/dropout/GaussianNoiseSpace.java | 2098 | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.arbiter.dropout;
import lombok.AllArgsConstructor;
import org.deeplearning4j.arbiter.optimize.api.ParameterSpace;
import org.deeplearning4j.arbiter.optimize.parameter.FixedValue;
import org.deeplearning4j.nn.conf.dropout.GaussianNoise;
import org.deeplearning4j.nn.conf.dropout.IDropout;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@AllArgsConstructor
public class GaussianNoiseSpace implements ParameterSpace<IDropout> {
private ParameterSpace<Double> stddev;
public GaussianNoiseSpace(double stddev){
this(new FixedValue<>(stddev));
}
@Override
public IDropout getValue(double[] parameterValues) {
return new GaussianNoise(stddev.getValue(parameterValues));
}
@Override
public int numParameters() {
return stddev.numParameters();
}
@Override
public List<ParameterSpace> collectLeaves() {
return Collections.<ParameterSpace>singletonList(stddev);
}
@Override
public Map<String, ParameterSpace> getNestedSpaces() {
return Collections.<String,ParameterSpace>singletonMap("stddev", stddev);
}
@Override
public boolean isLeaf() {
return false;
}
@Override
public void setIndices(int... indices) {
stddev.setIndices(indices);
}
}
| apache-2.0 |
srowen/oryx | docs/docs/admin.html | 31501 |
<!DOCTYPE html>
<!--
Generated by Apache Maven Doxia at 2018-11-24
Rendered using Reflow Maven Skin 1.1.1 (http://andriusvelykis.github.io/reflow-maven-skin)
-->
<html xml:lang="en" lang="en">
<head>
<meta charset="UTF-8" />
<title>Oryx – Docs: Admin</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="description" content="" />
<meta http-equiv="content-language" content="en" />
<link href="http://netdna.bootstrapcdn.com/bootswatch/2.3.2/united/bootstrap.min.css" rel="stylesheet" />
<link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-responsive.min.css" rel="stylesheet" />
<link href="../css/bootswatch.css" rel="stylesheet" />
<link href="../css/reflow-skin.css" rel="stylesheet" />
<link href="../css/lightbox.css" rel="stylesheet" />
<link href="../css/site.css" rel="stylesheet" />
<link href="../css/print.css" rel="stylesheet" media="print" />
<!-- Le HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body class="page-docs-admin project-oryx" data-spy="scroll" data-offset="60" data-target="#toc-scroll-target">
<div class="navbar navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<a class="btn btn-navbar" data-toggle="collapse" data-target="#top-nav-collapse">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</a>
<a class="brand" href="">Oryx 2</a>
<div class="nav-collapse collapse" id="top-nav-collapse">
<ul class="nav pull-right">
<li ><a href="../" title="Overview">Overview</a></li>
<li ><a href="endusers.html" title="Docs: End Users">Docs: End Users</a></li>
<li ><a href="developer.html" title="Docs: Dev">Docs: Dev</a></li>
<li class="active"><a href="" title="Docs: Admin">Docs: Admin</a></li>
<li ><a href="performance.html" title="Performance">Performance</a></li>
<li ><a href="../apidocs/index.html" title="Javadoc">Javadoc</a></li>
<li ><a href="https://github.com/OryxProject/oryx" title="GitHub" class="externalLink">GitHub</a></li>
<li ><a href="https://github.com/OryxProject/oryx/releases" title="Download" class="externalLink">Download</a></li>
</ul>
</div><!--/.nav-collapse -->
</div>
</div>
</div>
<div class="container">
<!-- Masthead
================================================== -->
<header>
</header>
<div class="main-body">
<div class="row">
<div class="span8">
<div class="body-content">
<div class="page-header">
<h1 id="cluster_setup">Cluster Setup</h1>
</div>
<p>The following are required as of Oryx 2.8.0:</p>
<ul>
<li>Java 8 or later (JRE only is required)</li>
<li>Scala 2.11 or later</li>
<li>A <a class="externalLink" href="https://hadoop.apache.org/">Apache Hadoop</a> cluster running the following components:
<ul>
<li><a class="externalLink" href="https://hadoop.apache.org/">Apache Hadoop</a> 3.0 or later</li>
<li><a class="externalLink" href="https://zookeeper.apache.org/">Apache Zookeeper</a> 3.4.5 or later</li>
<li><a class="externalLink" href="https://kafka.apache.org/">Apache Kafka</a> 0.11.0 or later</li>
<li><a class="externalLink" href="https://spark.apache.org/">Apache Spark</a> 2.4.0 or later</li>
</ul></li>
</ul>
<div class="section">
<h2 id="Requirements_Matrix_Summary">Requirements Matrix Summary</h2>
<p>This table summarizes the version of several key components targeted by each version.</p>
<table border="0" class="bodyTable table table-striped table-hover">
<thead>
<tr class="a">
<th>Oryx </th>
<th>Spark </th>
<th>Hadoop </th>
<th>Suggested CDH </th>
</tr>
</thead>
<tbody>
<tr class="b">
<td>2.8.x </td>
<td>2.4.x </td>
<td>3.0+ </td>
<td>6.0+ </td>
</tr>
<tr class="a">
<td>2.7.x </td>
<td>2.3.x </td>
<td>2.7+ </td>
<td>5.15+ </td>
</tr>
</tbody>
</table>
<p>Previous versions supported older versions, though are no longer maintained:</p>
<table border="0" class="bodyTable table table-striped table-hover">
<thead>
<tr class="a">
<th>Oryx </th>
<th>Java </th>
<th>Scala </th>
<th>Spark </th>
<th>Kafka </th>
<th>CDH </th>
</tr>
</thead>
<tbody>
<tr class="b">
<td>2.6.x </td>
<td>8 </td>
<td>2.11 </td>
<td>2.2.x </td>
<td>0.11.x </td>
<td>5.12 </td>
</tr>
<tr class="a">
<td>2.5.x </td>
<td>8 </td>
<td>2.11 </td>
<td>2.1.x </td>
<td>0.10.2 </td>
<td>5.12 </td>
</tr>
<tr class="b">
<td>2.4.x </td>
<td>8 </td>
<td>2.11 </td>
<td>2.1.x </td>
<td>0.10.0 </td>
<td>5.11 </td>
</tr>
<tr class="a">
<td>2.3.x </td>
<td>8 </td>
<td>2.11 </td>
<td>2.0.x </td>
<td>0.9.x </td>
<td>5.10 </td>
</tr>
<tr class="b">
<td>2.2.x </td>
<td>8 </td>
<td>2.10 </td>
<td>1.6.x </td>
<td>0.9.x </td>
<td>5.7 </td>
</tr>
<tr class="a">
<td>2.1.x </td>
<td>7 </td>
<td>2.10 </td>
<td>1.5.x </td>
<td>0.8.x </td>
<td>5.5 </td>
</tr>
<tr class="b">
<td>2.0.x </td>
<td>7 </td>
<td>2.10 </td>
<td>1.3.x </td>
<td>0.8.x </td>
<td>5.4 </td>
</tr>
</tbody>
</table>
</div>
<div class="section">
<h2 id="Deployment_Architecture">Deployment Architecture</h2>
<p>Because the Batch and Speed Layers are Spark applications, they need to run within a cluster. The applications themselves run the driver for these Spark applications, and these may run on an edge node in a cluster like any other Spark application.. That is, the binaries themselves do not need to run on a node that also runs a particular service, but, it will need to run on a node within the cluster because both Layer application interact extensively with compute and storage within the cluster.</p>
<p>The Serving Layer may be run within the cluster too, and may be run via YARN on any node. However it’s common to consider deploying this Layer, which exposes an API to external services, on a node that is not within the cluster. This is possible. The Serving Layer must be able to communicate with a Kafka broker, at a minimum.</p>
<p>In some applications, the Serving Layer also needs to read large models directly from HDFS. In these cases, it would also have to access HDFS. This is only required in applications that must write large models to HDFS. This is closely related to <tt>oryx.update-topic.message.max-size</tt> and the maximum size message that Kafka can support.</p>
</div>
<div class="section">
<h2 id="Services">Services</h2>
<p>Install and configure the Hadoop cluster normally. The following services need to be enabled:</p>
<ul>
<li>HDFS</li>
<li>YARN</li>
<li>Zookeeper</li>
<li>Kafka</li>
<li>Spark 2</li>
</ul>
<p>Note that for CDH 5.x, Spark 2.3 is available as an <a class="externalLink" href="https://www.cloudera.com/documentation/spark2/latest/topics/spark2.html">add on</a>.</p>
<p>Kafka is available as a parcel from <a class="externalLink" href="https://www.cloudera.com/documentation/kafka/latest/topics/kafka_packaging.html">Cloudera</a>. The Cloudera Kafka 3.x parcel is required, because it contains a distribution of Kafka 0.11. </p>
<p>Determine the (possibly several) Kafka brokers that are configured in the cluster, under Instances, and note their hosts and port. The port is typically 9092. Same for the Zookeeper servers; the default port here is 2181. Default ports will be used in subsequent examples.</p>
<p>Where a Kafka broker or Zookeeper server is called for, you can and should specify a comma-separated list of <tt>host:port</tt> pairs where there are multiple hosts. Example: <tt>your-zk-1:2181,your-zk-2:2181</tt>.</p>
<p>Also note whether your Zookeeper instance is using a chroot path. This is simply a path suffixed to the host:port, like <tt>your-zk:2181/your-chroot</tt>. It is often <tt>/kafka</tt> if it is set, and this is the default on Cloudera’s distribution, in some versions. You can omit this if you are not using a chroot. </p>
<p>Note: if you have multiple Zookeeper servers, and a chroot, only add the chroot once, at the end: <tt>your-zk-1:2181,your-zk-2:2181/kafka</tt></p>
</div>
<div class="section">
<h2 id="Java">Java</h2>
<p>Java 8 (JRE) needs to be installed on all nodes on the cluster. Cluster processes need to use Java 8. Depending on the nature of your Hadoop cluster installation, this may mean updating the default Java version with <tt>update-alternatives --config java</tt> or equivalent, or setting <tt>JAVA_HOME</tt> to point to a Java 8 installation.</p>
<p>For CDH in particular, however, instead see <a class="externalLink" href="https://www.cloudera.com/documentation/enterprise/latest/topics/cm_ig_java_home_location.html">Configuring a Custom Java Home Location</a></p>
</div>
<div class="section">
<h2 id="Configuring_Kafka">Configuring Kafka</h2>
<p>Oryx will use two Kafka topics for data transport. One carries input data to the batch and Speed Layer, and the other carries model updates from there on to the Serving Layer. The default names of these topics are <tt>OryxInput</tt> and <tt>OryxUpdate</tt> respectively. They need to be created before Oryx is started.</p>
<p>The number of partitions for the <i>input</i> topic will affect the number of partitions, and therefore parallelism, of the Spark Streaming jobs that consume them. For example, the Batch Layer reads partitions of historical data from HDFS and from Kafka. If the input topic has just one partition but a large amount of data arrives per interval, then the Kafka-based partition of the input may be relatively very large and take a long time to process. A good rule of thumb may be to choose a number of topic partitions such that the amount of data that arrives in one batch interval, per partition, is expected to be under the size of one HDFS block, which is 128MB by default. So if you have 1.28GB arriving per batch interval, at least 10 partitions is probably a good idea to make sure the data can be processed in reasonably sized chunks, and with enough parallelism.</p>
<p>The provided <tt>oryx-run.sh kafka-setup</tt> script configures a default of 4 partitions, but this can be changed later. Note that there is no purpose in configuring more than 1 partition for the <i>update</i> topic.</p>
<p>Replication factor can be any value, but at least 2 is recommended. Note that the replication factor can’t exceed the number of Kafka brokers in the cluster. The provided script sets replication to 1, by default, for this reason. This can be changed later with, for example, <tt>kafka-topics --zookeeper ... --alter --topic ... --replication-factor N</tt></p>
<p>You may need to configure the retention time for one or both topics. In particular, it’s typically important to limit the retention time for the update topic, since the Speed and Serving Layer read the entire topic from the start on startup to catch up. This is not as important for the input topic, which is not re-read from the beginning.</p>
<p>Setting it to twice the Batch Layer update interval is a good start. For example, to set it to 1 day (24 * 60 * 60 * 1000 = 86400000 ms), set the topic’s <tt>retention.ms</tt> property to 86400000. This is done automatically by the provided <tt>oryx-run.sh kafka-setup</tt> script.</p>
<p>The two topics above may contain large messages; in particular the update topic includes entire serialized PMML models. It’s possible that they exceed Kafka’s default max message size of 1 MiB. If large models are expected, then the topic’s <tt>max.message.bytes</tt> should be configured to allow larger messages. <tt>oryx-run.sh kafka-setup</tt> sets a default of 16 MiB for the update topic. This is also the default maximum size of a model that Oryx will attempt to write to the update topic; larger models will be passed as a reference to the model file’s location on HDFS instead. See setting <tt>oryx.update-topic.message.max-size</tt>.</p>
<p>The Kafka broker’s <tt>message.max.bytes</tt> (note the different name!) property also controls this, but setting it affects all topics managed by the broker, which may be undesirable. See <a class="externalLink" href="https://www.cloudera.com/documentation/kafka/latest/topics/kafka_performance.html">Performance and Resource Considerations</a> for a more complete discussion. In particular, note that <tt>replica.fetch.max.bytes</tt> would have to be set in the broker in order to <i>replicate</i> any very large messages. There is no per-topic equivalent to this.</p>
<div class="section">
<h3 id="Automated_Kafka_Configuration">Automated Kafka Configuration</h3>
<p>The provided <tt>oryx-run.sh</tt> script can be used to print current configuration for Zookeeper, list existing topics in Kafka, and optionally create the configured input and update topics if needed. </p>
<p>You will need to create an Oryx configuration file first, which can be cloned from the example at <a class="externalLink" href="https://github.com/OryxProject/oryx/blob/master/app/conf/als-example.conf">conf/als-example.conf</a> as a start. At least change the Kafka and Zookeeper configuration, as well as topic names, as desired.</p>
<p>With this file as <tt>oryx.conf</tt> and any of the layer JAR files in the same directory, run:</p>
<div class="source">
<div class="source">
<pre>./oryx-run.sh kafka-setup
Input ZK: your-zk:2181
Input Kafka: your-kafka:9092
Input topic: OryxInput
Update ZK: your-zk:2181
Update Kafka: your-kafka:9092
Update topic: OryxUpdate
All available topics:
Input topic OryxInput does not exist. Create it? y
Creating topic OryxInput
Created topic "OryxInput".
Status of topic OryxInput:
Topic:OryxInput PartitionCount:4 ReplicationFactor:1 Configs:
Topic: OryxInput Partition: 0 Leader: 120 Replicas: 120,121 Isr: 120,121
Topic: OryxInput Partition: 1 Leader: 121 Replicas: 121,120 Isr: 121,120
Topic: OryxInput Partition: 2 Leader: 120 Replicas: 120,121 Isr: 120,121
Topic: OryxInput Partition: 3 Leader: 121 Replicas: 121,120 Isr: 121,120
Update topic OryxUpdate does not exist. Create it? y
Creating topic OryxUpdate
Created topic "OryxUpdate".
Updated config for topic "OryxUpdate".
Status of topic OryxUpdate:
Topic:OryxUpdate PartitionCount:1 ReplicationFactor:1 Configs:retention.ms=86400000,max.message.bytes=16777216
Topic: OryxUpdate Partition: 0 Leader: 120 Replicas: 120,121 Isr: 120,121
</pre>
</div>
</div>
<p>To watch messages sent to the input and update topics, to monitor action of the application, try:</p>
<div class="source">
<div class="source">
<pre>./oryx-run.sh kafka-tail
Input ZK: your-zk:2181
Input Kafka: your-kafka:9092
Input topic: OryxInput
Update ZK: your-zk:2181
Update Kafka: your-kafka:9092
Update topic: OryxUpdate
...output...
</pre>
</div>
</div>
<p>Then in another window, you can feed input, such as the <tt>data.csv</tt> example from the <a href="endusers.html">end user docs</a>, into the input queue to verify it’s working with:</p>
<div class="source">
<div class="source">
<pre>./oryx-run.sh kafka-input --input-file data.csv
</pre>
</div>
</div>
<p>If all is well, these processes can be terminated. The cluster is ready to run Oryx.</p>
</div>
</div>
<div class="section">
<h2 id="HDFS_and_Data_Layout">HDFS and Data Layout</h2>
<p>Kafka is the data transport mechanism in Oryx, so data is present in Kafka at least temporarily. However input data is also stored persistently in HDFS for later use. Likewise, models and updates are produced to a Kafka update topic, but models are also persisted to HDFS for later reference.</p>
<p>Input data is stored in HDFS under the directory defined by <tt>oryx.batch.storage.data-dir</tt>. Under this directory, subdirectories titled <tt>oryx-[timestamp].data</tt> are created, one for each batch executed by Spark Streaming in the Batch Layer. Here, <tt>timestamp</tt> is the familiar Unix timestamp in milliseconds.</p>
<p>Like most “files” output by distributed processes in Hadoop, this is actually a subdirectory containing many <tt>part-*</tt> files. Each file is a <tt>SequenceFile</tt>, where keys and values from the Kafka input topic have been serialized according to a <tt>Writable</tt> class implementation defined by <tt>oryx.batch.storage.key-writable-class</tt> and <tt>oryx.batch.storage.message-writable-class</tt>. By default, this is <tt>TextWritable</tt> and the string representation of keys and messages are recorded.</p>
<p>Data may be deleted from this data directory if desired. It will no longer be used in future Batch Layer computations. In particular, note that setting <tt>oryx.batch.storage.max-age-data-hours</tt> to a nonnegative value will configure the Batch Layer to automatically delete data older than the given number of hours.</p>
<p>Similarly, machine-learning-oriented applications (which extend <tt>MLUpdate</tt>) output the model chosen by the Batch Layer in each batch interval. It is also persisted in a subdirectory of the directory defined by <tt>oryx.batch.storage.model-dir</tt>. Under this directory are subdirectories named <tt>[timestamp]</tt>, where <tt>timestamp</tt> is again the familiar Unix timestamp in milliseconds.</p>
<p>The content of this subdirectory will depend on the application, but typically contains a PMML model called <tt>model.pmml</tt>, and optionally supplementary files that go with the model.</p>
<p>This directory exists to record PMML models for archiving and for use by other tools. Its content may be deleted if desired. Note also that setting <tt>oryx.batch.storage.max-age-model-hours</tt> to a nonnegative value will cause models older than the given number of hours to be deleted automatically.</p>
<h1 id="handling_failure">Handling Failure</h1>
<p>Eventually, you’ll want to stop one or more of the Layers running, or restart it. Or maybe a server decides to die. What happens then? What’s the worst that can happen?</p>
</div>
<div class="section">
<h2 id="Data_Loss">Data Loss</h2>
<p>Historical data is saved in HDFS, which should be configured for replication. HDFS ensures data is stored reliably. Kafka is also designed to cope with failure when configured to use replication.</p>
<p>That is, there is nothing special to do here in order to ensure that data is never completely lost. It is the job of HDFS and Kafka to always be available and not lose data.</p>
</div>
<div class="section">
<h2 id="Server_Failure">Server Failure</h2>
<p>In general, all three Layer server processes should run continuously, and can and should be restarted immediately if they have to be stopped, or in case of a failure. This can be accomplished with an init script or similar mechanism (not included, yet).</p>
<div class="section">
<h3 id="Serving_Layer">Serving Layer</h3>
<p>The Serving Layer has no state. On startup, it reads all models and updates available on the update topic. It begins answering queries as soon as any first, valid model is available. For this reason, it’s desirable to limit the retention time for the update topic.</p>
<p>The operation of the Serving Layer is not distributed. Each instance is independent, and may stop or start without affecting others.</p>
</div>
<div class="section">
<h3 id="Speed_Layer">Speed Layer</h3>
<p>The Speed Layer also has no state, and also reads all models and updates available on the update topic. It begins producing updates as soon as it has a valid model. It also begins reading from the input topic, and at the moment, always reads from the latest offset.</p>
<p>The Speed Layer uses Spark Streaming and Spark for some of its computation. Spark has the responsibility of dealing with failures during computation in the cluster and retrying tasks.</p>
<p>Spark Streaming’s Kafka integration can in some cases recover from failure of the receiver that is reading from Kafka. If the entire process dies and is restarted, and <tt>oryx.id</tt> has been set, then reading will be able to resume from the last offset recorded by Kafka. (Otherwise, it will resume reading from the latest offset. This means data that arrived while no Speed Layer was running will not have produced any update.) Also, data that arrives before the Speed Layer has a model is ignored too. It effectively adopts “at most once” semantics.</p>
<p>Because the role of the Speed Layer is to provide an approximate, “best effort” update to the last published model, this behavior is generally no problem, and desirable because of its simplicity.</p>
</div>
<div class="section">
<h3 id="Batch_Layer">Batch Layer</h3>
<p>The Batch Layer is the most complex, since it does generate some state:</p>
<ul>
<li>Historical data, is always persisted to HDFS</li>
<li>If the app chooses to, additional state like models can be persisted to HDFS as well as topics</li>
</ul>
<p>It also is most sensitive to reading data multiple times or not at all, since it is the component that creates the “official” next model.</p>
<p>As with the Speed Layer, Spark and Spark Streaming handles many of the failure scenarios during computation. It also manages storing data to HDFS and is responsible for avoiding writing the same data twice.</p>
<p>Applications are responsible for recovering their own ‘state’; currently, applications built on the Oryx ML tier write state into unique subdirectories, and will simply produce a new set of state in a new directory when restarted. Previous state, if it exists, will have been completely written or not at all.</p>
<p>The Batch Layer also currently adopts the same “at most once” semantics as the Speed Layer. As above, if the entire process dies and is restarted, and <tt>oryx.id</tt> has been set, then reading will be able to resume from the last offset recorded by Kafka, and otherwise, it will resume reading from the latest offset.</p>
<h1 id="troubleshooting__faq">Troubleshooting / FAQ</h1>
</div>
</div>
<div class="section">
<h2 id="Unsupported_majorminor_version_520">Unsupported major.minor version 52.0</h2>
<p>This means you are running 7 or earlier somewhere. Oryx requires Java 8 or later. See section above on installing Java 8 and making it available everywhere on the cluster.</p>
<p>If you believe that Java 8 is installed, then try setting <tt>JAVA_HOME</tt> explicitly to the location of the Java 8 JRE/JDK home directory before running the Oryx daemons.</p>
</div>
<div class="section">
<h2 id="Initial_job_has_not_accepted_any_resources">Initial job has not accepted any resources</h2>
<p>The error usually means that your YARN cluster can’t allocate the resources (memory, cores) that your application is requesting. You’ll have to check and increase what YARN can allocate, free up room, or decrease the amount that your app asks for.</p>
<p>The relevant YARN settings are:</p>
<ul>
<li>Container Memory (<tt>yarn.nodemanager.resource.memory-mb</tt>) - the maximum memory that one YARN node has to allocate to containers</li>
<li>Container Virtual CPU Cores (<tt>yarn.nodemanager.resource.cpu-vcores</tt>) - same, for cores</li>
<li>Container Memory Maximum (<tt>yarn.scheduler.maximum-allocation-mb</tt>) - maximum memory for one container</li>
<li>Container Virtual CPU Cores Maximum (<tt>yarn.scheduler.maximum-allocation-vcores</tt>) - maximum cores for one container</li>
</ul>
<p>The relevant app settings are:</p>
<ul>
<li><tt>oryx.{batch,speed}.streaming.num-executors</tt> - number of executors (YARN containers) to allocate</li>
<li><tt>oryx.{batch,speed}.streaming.executor-cores</tt> - cores to allocate per executor</li>
<li><tt>oryx.{batch,speed}.streaming.executor-memory</tt> - memory to allocate per executor</li>
</ul>
</div>
<div class="section">
<h2 id="Required_executor_memory__MB_is_above_the_max_threshold__MB_of_this_cluster">Required executor memory (… MB) is above the max threshold (… MB) of this cluster</h2>
<p>This means your YARN configuration limits the maximum container size that can be requested. Increase the Container Memory Maximum (<tt>yarn.scheduler.maximum-allocation-mb</tt>) to something larger. For Spark, it generally makes sense to allow large containers.</p>
</div>
<div class="section">
<h2 id="IllegalArgumentException_Wrong_FS">IllegalArgumentException: Wrong FS</h2>
<div class="source">
<div class="source">
<pre>java.lang.IllegalArgumentException: Wrong FS: hdfs:..., expected: file:///
at org.apache.hadoop.fs.FileSystem.checkPath(FileSystem.java:645)
</pre>
</div>
</div>
<p>This typically means you are using HDFS, but your Hadoop config (e.g. <tt>core-site.xml</tt>, typically in <tt>/etc/hadoop/conf</tt> is not on the classpath. If you’re building a custom <tt>compute-classpath.sh</tt> script make sure to include this directory along with JARs.</p>
</div>
<div class="section">
<h2 id="I_need_to_purge_all_previous_data_and_start_again">I need to purge all previous data and start again</h2>
<p>Input data exists in the input Kafka topic for a time before being copied into HDFS. So, input potentially exists as unread message in this topic as well as in the HDFS directory defined by <tt>orxy.batch.storage.data-dir</tt>. It’s easy to delete the data in HDFS; it’s harder to ensure older data in the input topic is not read.</p>
<p>The simplest solution is to create a new input topic and change configuration to use it. Then, also delete any pre-existing data in HDFS (or use a new directory). Similarly, since the update topic is read from the beginning, it’s easiest to make a new update topic instead.</p>
<p>It’s possible to reuse an existing topic name, by removing all its data (difficult, not recommended) or simply deleting and recreating it. If recreating the topic, it’s necessary to reset the consumer offset Oryx will use. This can be done by directly manipulating offsets stored in Zookeeper, to delete them (somewhat hard, not recommended), or by simply switching <tt>oryx.id</tt> to another value.</p>
</div>
<div class="section">
<h2 id="Speed_Layer_isnt_producing_updates_but_is_running">Speed Layer isn’t producing updates, but is running</h2>
<p>The Speed Layer won’t produce updates until it has loaded a model. Also, check if the Speed Layer’s batches are queued up. If batches are being created faster than they’re processed, then each is waiting longer and longer to start processing, delaying their updates.</p>
<h1 id="performance">Performance</h1>
<p>See <a href="performance.html">the performance doc</a>.</p>
</div>
</div>
</div>
<div class="span4">
<div id="toc-sidebar">
<div class="well">
<ul class="nav nav-list">
<li class="nav-header">Table of Contents</li>
<li class="dropdown"><a href="#cluster_setup" title="Cluster Setup">Cluster Setup <b class="caret"></b></a>
<ul class="nav nav-list">
<li><a href="#Requirements_Matrix_Summary" title="Requirements Matrix Summary">Requirements Matrix Summary</a>
<li><a href="#Deployment_Architecture" title="Deployment Architecture">Deployment Architecture</a>
<li><a href="#Services" title="Services">Services</a>
<li><a href="#Java" title="Java">Java</a>
<li class="dropdown"><a href="#Configuring_Kafka" title="Configuring Kafka">Configuring Kafka <b class="caret"></b></a>
<ul class="nav nav-list">
<li><a href="#Automated_Kafka_Configuration" title="Automated Kafka Configuration">Automated Kafka Configuration</a>
<li class="divider"></li>
</ul>
</li>
<li><a href="#HDFS_and_Data_Layout" title="HDFS and Data Layout">HDFS and Data Layout</a>
<li class="divider"></li>
</ul>
</li>
<li class="dropdown"><a href="#handling_failure" title="Handling Failure">Handling Failure <b class="caret"></b></a>
<ul class="nav nav-list">
<li><a href="#Data_Loss" title="Data Loss">Data Loss</a>
<li class="dropdown"><a href="#Server_Failure" title="Server Failure">Server Failure <b class="caret"></b></a>
<ul class="nav nav-list">
<li><a href="#Serving_Layer" title="Serving Layer">Serving Layer</a>
<li><a href="#Speed_Layer" title="Speed Layer">Speed Layer</a>
<li><a href="#Batch_Layer" title="Batch Layer">Batch Layer</a>
<li class="divider"></li>
</ul>
</li>
</ul>
</li>
<li class="dropdown"><a href="#troubleshooting__faq" title="Troubleshooting / FAQ">Troubleshooting / FAQ <b class="caret"></b></a>
<ul class="nav nav-list">
<li><a href="#Unsupported_majorminor_version_520" title="Unsupported major.minor version 52.0">Unsupported major.minor version 52.0</a>
<li><a href="#Initial_job_has_not_accepted_any_resources" title="Initial job has not accepted any resources">Initial job has not accepted any resources</a>
<li><a href="#Required_executor_memory__MB_is_above_the_max_threshold__MB_of_this_cluster" title="Required executor memory (… MB) is above the max threshold (… MB) of this cluster">Required executor memory (… MB) is above the max threshold (… MB) of this cluster</a>
<li><a href="#IllegalArgumentException_Wrong_FS" title="IllegalArgumentException: Wrong FS">IllegalArgumentException: Wrong FS</a>
<li><a href="#I_need_to_purge_all_previous_data_and_start_again" title="I need to purge all previous data and start again">I need to purge all previous data and start again</a>
<li><a href="#Speed_Layer_isnt_producing_updates_but_is_running" title="Speed Layer isn’t producing updates, but is running">Speed Layer isn’t producing updates, but is running</a>
<li class="divider"></li>
</ul>
</li>
<li><a href="#performance" title="Performance">Performance</a>
</ul>
</div>
</div>
</div>
</div>
</div>
</div><!-- /container -->
<!-- Footer
================================================== -->
<footer class="well">
<div class="container">
<div class="row">
<div class="span9 bottom-nav">
<ul class="nav nav-list">
</ul>
</div>
</div>
</div>
</footer>
<div class="container subfooter">
<div class="row">
<div class="span12">
<p class="pull-right"><a href="#">Back to top</a></p>
<p class="copyright">Copyright ©2014-2018. All Rights Reserved.</p>
<p><a href="http://github.com/andriusvelykis/reflow-maven-skin" title="Reflow Maven skin">Reflow Maven skin</a> by <a href="http://andrius.velykis.lt" target="_blank" title="Andrius Velykis">Andrius Velykis</a>.</p>
</div>
</div>
</div>
<!-- Le javascript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script src="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.2/js/bootstrap.min.js"></script>
<script src="../js/lightbox.min.js"></script>
<script src="../js/reflow-scroll.js"></script>
<script src="../js/reflow-skin.js"></script>
</body>
</html>
| apache-2.0 |
apache/commons-compress | src/main/java/org/apache/commons/compress/utils/ServiceLoaderIterator.java | 2727 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.commons.compress.utils;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
/**
* Iterates all services for a given class through the standard
* {@link ServiceLoader} mechanism.
*
* @param <E>
* The service to load
* @since 1.13
*/
public class ServiceLoaderIterator<E> implements Iterator<E> {
private E nextServiceLoader;
private final Class<E> service;
private final Iterator<E> serviceLoaderIterator;
public ServiceLoaderIterator(final Class<E> service) {
this(service, ClassLoader.getSystemClassLoader());
}
public ServiceLoaderIterator(final Class<E> service, final ClassLoader classLoader) {
this.service = service;
this.serviceLoaderIterator = ServiceLoader.load(service, classLoader).iterator();
}
@Override
public boolean hasNext() {
while (nextServiceLoader == null) {
try {
if (!serviceLoaderIterator.hasNext()) {
return false;
}
nextServiceLoader = serviceLoaderIterator.next();
} catch (final ServiceConfigurationError e) {
if (e.getCause() instanceof SecurityException) {
// Ignore security exceptions
// TODO Log?
continue;
}
throw e;
}
}
return true;
}
@Override
public E next() {
if (!hasNext()) {
throw new NoSuchElementException("No more elements for service " + service.getName());
}
final E tempNext = nextServiceLoader;
nextServiceLoader = null;
return tempNext;
}
@Override
public void remove() {
throw new UnsupportedOperationException("service=" + service.getName());
}
}
| apache-2.0 |
gobjapan/LambdaOkeiko | src/answers/chapter3/ForEach06.java | 1304 | package answers.chapter3;
import java.util.stream.IntStream;
public class ForEach06 {
public ForEach06() {
// for文で記述
printEvens();
// IntStream#rangeメソッドでストリームを生成して、使用
printEvensStream1();
// filterメソッドを追加
printEvensStream2();
// メソッド参照を使用
printEvensStream3();
}
private void printEvens() {
for (int i = 0; i < 20; i++) {
if (i % 2 == 0) {
System.out.println(i);
}
}
}
private void printEvensStream1() {
IntStream.range(0, 20)
.forEach(i -> {
if (i % 2 == 0) {
System.out.println(i);
}
});
}
private void printEvensStream2() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(i -> System.out.println(i));
}
private void printEvensStream3() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(System.out::println);
}
public static void main(String... args) {
new ForEach06();
}
}
| apache-2.0 |
SeleniumHQ/selenium | rb/spec/integration/selenium/webdriver/element_spec.rb | 19292 | # frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require_relative 'spec_helper'
module Selenium
module WebDriver
describe Element do
it 'should click' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'imageButton').click }.not_to raise_error
reset_driver!(1) if %i[safari safari_preview].include? GlobalTestEnv.browser
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if different element receives click', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if element is partially covered', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'other_contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
it 'should submit' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'submitButton')
expect { driver.find_element(id: 'submitButton').submit }.not_to raise_error
reset_driver!
end
it 'should send string keys' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'working')
expect { driver.find_element(id: 'working').send_keys('foo', 'bar') }.not_to raise_error
end
it 'should send key presses' do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys('Tet', :arrow_left, 's')
expect(key_reporter.attribute('value')).to eq('Test')
end
# https://github.com/mozilla/geckodriver/issues/245
it 'should send key presses chords', except: {browser: %i[firefox firefox_nightly safari safari_preview]} do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys([:shift, 'h'], 'ello')
expect(key_reporter.attribute('value')).to eq('Hello')
end
it 'should handle file uploads' do
driver.navigate.to url_for('formPage.html')
element = driver.find_element(id: 'upload')
expect(element.attribute('value')).to be_empty
path = WebDriver::Platform.windows? ? WebDriver::Platform.windows_path(__FILE__) : __FILE__
element.send_keys path
expect(element.attribute('value')).to include(File.basename(path))
end
describe 'properties and attributes' do
before { driver.navigate.to url_for('formPage.html') }
context 'string type' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'type' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checkbox'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'checkbox'
end
it '#attribute returns value' do
expect(element.attribute(prop_or_attr)).to eq 'checkbox'
end
end
context 'numeric type' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'rows' }
it '#dom_attribute String' do
expect(element.dom_attribute(prop_or_attr)).to eq '5'
end
it '#property returns Number' do
expect(element.property(prop_or_attr)).to eq 5
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq '5'
end
end
context 'boolean type of true' do
let(:element) { driver.find_element(id: 'checkedchecky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
it '#dom_attribute does not update after click', except: {browser: :safari} do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property updates to false after click' do
element.click
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute updates to nil after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq nil
end
end
context 'boolean type of false' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns false' do
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
it '#dom_attribute does not update after click' do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq nil
end
it '#property updates to true after click' do
element.click
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute updates to String after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property exists but attribute does not' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'value' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns default property' do
expect(element.property(prop_or_attr)).to eq 'Example text'
end
it '#attribute returns default property' do
expect(element.attribute(prop_or_attr)).to eq 'Example text'
end
it '#property returns updated property' do
element.clear
expect(element.property(prop_or_attr)).to be_empty
end
it '#attribute returns updated property' do
element.clear
expect(element.attribute(prop_or_attr)).to be_empty
end
end
context 'attribute exists but property does not' do
let(:element) { driver.find_element(id: 'vsearchGadget') }
let(:prop_or_attr) { 'accesskey' }
it '#dom_attribute returns attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq '4'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute' do
expect(element.attribute(prop_or_attr)).to eq '4'
end
end
context 'neither attribute nor property exists' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nonexistent' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
end
context 'style' do
before { driver.navigate.to url_for('clickEventPage.html') }
let(:element) { driver.find_element(id: 'result') }
let(:prop_or_attr) { 'style' }
it '#dom_attribute attribute with no formatting' do
expect(element.dom_attribute(prop_or_attr)).to eq 'width:300;height:60'
end
# TODO: This might not be correct behavior
it '#property returns object',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1846'},
{browser: :safari}] do
expect(element.property(prop_or_attr)).to eq %w[width height]
end
it '#attribute returns attribute with formatting' do
expect(element.attribute(prop_or_attr)).to eq 'width: 300px; height: 60px;'
end
end
context 'incorrect casing' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nAme' }
it '#dom_attribute returns correctly cased attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checky'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns correctly cased attribute' do
expect(element.attribute(prop_or_attr)).to eq 'checky'
end
end
context 'property attribute case difference with attribute casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readonly' }
it '#dom_attribute returns a String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns a String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute case difference with property casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readOnly' }
it '#dom_attribute returns a String',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1850'},
{browser: :safari}] do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns property as true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns property as String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute name difference with attribute naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'class' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'gromit'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute name difference with property naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'className' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'gromit'
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute value difference' do
let(:element) { driver.find_element(tag_name: 'form') }
let(:prop_or_attr) { 'action' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'resultPage.html'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
end
end
it 'returns ARIA role', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html," \
"<div role='heading' aria-level='1'>Level 1 Header</div>" \
"<h1>Level 1 Header</h1>" \
"<h2 role='alert'>Level 2 Header</h2>"
expect(driver.find_element(tag_name: 'div').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h1').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h2').aria_role).to eq('alert')
end
it 'returns accessible name', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html,<h1>Level 1 Header</h1>"
expect(driver.find_element(tag_name: 'h1').accessible_name).to eq('Level 1 Header')
end
it 'should clear' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'withText').clear }.not_to raise_error
end
it 'should get and set selected' do
driver.navigate.to url_for('formPage.html')
cheese = driver.find_element(id: 'cheese')
peas = driver.find_element(id: 'peas')
cheese.click
expect(cheese).to be_selected
expect(peas).not_to be_selected
peas.click
expect(peas).to be_selected
expect(cheese).not_to be_selected
end
it 'should get enabled' do
driver.navigate.to url_for('formPage.html')
expect(driver.find_element(id: 'notWorking')).not_to be_enabled
end
it 'should get text' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header').text).to eq('XHTML Might Be The Future')
end
it 'should get displayed' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header')).to be_displayed
end
context 'size and location' do
it 'should get current location' do
driver.navigate.to url_for('xhtmlTest.html')
loc = driver.find_element(class: 'header').location
expect(loc.x).to be >= 1
expect(loc.y).to be >= 1
end
it 'should get location once scrolled into view' do
driver.navigate.to url_for('javascriptPage.html')
loc = driver.find_element(id: 'keyUp').location_once_scrolled_into_view
expect(loc.x).to be >= 1
expect(loc.y).to be >= 0 # can be 0 if scrolled to the top
end
it 'should get size' do
driver.navigate.to url_for('xhtmlTest.html')
size = driver.find_element(class: 'header').size
expect(size.width).to be_positive
expect(size.height).to be_positive
end
it 'should get rect' do
driver.navigate.to url_for('xhtmlTest.html')
rect = driver.find_element(class: 'header').rect
expect(rect.x).to be_positive
expect(rect.y).to be_positive
expect(rect.width).to be_positive
expect(rect.height).to be_positive
end
end
# IE - https://github.com/SeleniumHQ/selenium/pull/4043
it 'should drag and drop', except: {browser: :ie} do
driver.navigate.to url_for('dragAndDropTest.html')
img1 = driver.find_element(id: 'test1')
img2 = driver.find_element(id: 'test2')
driver.action.drag_and_drop_by(img1, 100, 100)
.drag_and_drop(img2, img1)
.perform
expect(img1.location).to eq(img2.location)
end
it 'should get css property' do
driver.navigate.to url_for('javascriptPage.html')
element = driver.find_element(id: 'green-parent')
style1 = element.css_value('background-color')
style2 = element.style('background-color') # backwards compatibility
acceptable = ['rgb(0, 128, 0)', '#008000', 'rgba(0,128,0,1)', 'rgba(0, 128, 0, 1)']
expect(acceptable).to include(style1, style2)
end
it 'should know when two elements are equal' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
jsbody = driver.execute_script('return document.getElementsByTagName("body")[0]')
expect(body).to eq(xbody)
expect(body).to eq(jsbody)
expect(body).to eql(xbody)
expect(body).to eql(jsbody)
end
it 'should know when element arrays are equal' do
driver.navigate.to url_for('simpleTest.html')
tags = driver.find_elements(tag_name: 'div')
jstags = driver.execute_script('return document.getElementsByTagName("div")')
expect(tags).to eq(jstags)
end
it 'should know when two elements are not equal' do
driver.navigate.to url_for('simpleTest.html')
elements = driver.find_elements(tag_name: 'p')
p1 = elements.fetch(0)
p2 = elements.fetch(1)
expect(p1).not_to eq(p2)
expect(p1).not_to eql(p2)
end
it 'should return the same #hash for equal elements when found by Driver#find_element' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
expect(body.hash).to eq(xbody.hash)
end
it 'should return the same #hash for equal elements when found by Driver#find_elements' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_elements(tag_name: 'body').fetch(0)
xbody = driver.find_elements(xpath: '//body').fetch(0)
expect(body.hash).to eq(xbody.hash)
end
end
end # WebDriver
end # Selenium
| apache-2.0 |
turbokongen/home-assistant | homeassistant/components/hunterdouglas_powerview/__init__.py | 5873 | """The Hunter Douglas PowerView integration."""
import asyncio
from datetime import timedelta
import logging
from aiopvapi.helpers.aiorequest import AioRequest
from aiopvapi.helpers.constants import ATTR_ID
from aiopvapi.helpers.tools import base64_to_unicode
from aiopvapi.rooms import Rooms
from aiopvapi.scenes import Scenes
from aiopvapi.shades import Shades
from aiopvapi.userdata import UserData
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_FIRMWARE,
DEVICE_INFO,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_REVISION,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE_BUILD,
FIRMWARE_IN_USERDATA,
FIRMWARE_SUB_REVISION,
HUB_EXCEPTIONS,
HUB_NAME,
LEGACY_DEVICE_BUILD,
LEGACY_DEVICE_MODEL,
LEGACY_DEVICE_REVISION,
LEGACY_DEVICE_SUB_REVISION,
MAC_ADDRESS_IN_USERDATA,
MAINPROCESSOR_IN_USERDATA_FIRMWARE,
MODEL_IN_MAINPROCESSOR,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
PV_SHADE_DATA,
PV_SHADES,
REVISION_IN_MAINPROCESSOR,
ROOM_DATA,
SCENE_DATA,
SERIAL_NUMBER_IN_USERDATA,
SHADE_DATA,
USER_DATA,
)
PARALLEL_UPDATES = 1
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["cover", "scene", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, hass_config: dict):
"""Set up the Hunter Douglas PowerView component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Hunter Douglas PowerView from a config entry."""
config = entry.data
hub_address = config.get(CONF_HOST)
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request)
async with async_timeout.timeout(10):
rooms = Rooms(pv_request)
room_data = _async_map_data_by_id((await rooms.get_resources())[ROOM_DATA])
async with async_timeout.timeout(10):
scenes = Scenes(pv_request)
scene_data = _async_map_data_by_id(
(await scenes.get_resources())[SCENE_DATA]
)
async with async_timeout.timeout(10):
shades = Shades(pv_request)
shade_data = _async_map_data_by_id(
(await shades.get_resources())[SHADE_DATA]
)
except HUB_EXCEPTIONS as err:
_LOGGER.error("Connection error to PowerView hub: %s", hub_address)
raise ConfigEntryNotReady from err
if not device_info:
_LOGGER.error("Unable to initialize PowerView hub: %s", hub_address)
raise ConfigEntryNotReady
async def async_update_data():
"""Fetch data from shade endpoint."""
async with async_timeout.timeout(10):
shade_entries = await shades.get_resources()
if not shade_entries:
raise UpdateFailed("Failed to fetch new shade data.")
return _async_map_data_by_id(shade_entries[SHADE_DATA])
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="powerview hub",
update_method=async_update_data,
update_interval=timedelta(seconds=60),
)
hass.data[DOMAIN][entry.entry_id] = {
PV_API: pv_request,
PV_ROOM_DATA: room_data,
PV_SCENE_DATA: scene_data,
PV_SHADES: shades,
PV_SHADE_DATA: shade_data,
COORDINATOR: coordinator,
DEVICE_INFO: device_info,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_get_device_info(pv_request):
"""Determine device info."""
userdata = UserData(pv_request)
resources = await userdata.get_resources()
userdata_data = resources[USER_DATA]
if FIRMWARE_IN_USERDATA in userdata_data:
main_processor_info = userdata_data[FIRMWARE_IN_USERDATA][
MAINPROCESSOR_IN_USERDATA_FIRMWARE
]
else:
# Legacy devices
main_processor_info = {
REVISION_IN_MAINPROCESSOR: LEGACY_DEVICE_REVISION,
FIRMWARE_SUB_REVISION: LEGACY_DEVICE_SUB_REVISION,
FIRMWARE_BUILD: LEGACY_DEVICE_BUILD,
MODEL_IN_MAINPROCESSOR: LEGACY_DEVICE_MODEL,
}
return {
DEVICE_NAME: base64_to_unicode(userdata_data[HUB_NAME]),
DEVICE_MAC_ADDRESS: userdata_data[MAC_ADDRESS_IN_USERDATA],
DEVICE_SERIAL_NUMBER: userdata_data[SERIAL_NUMBER_IN_USERDATA],
DEVICE_REVISION: main_processor_info[REVISION_IN_MAINPROCESSOR],
DEVICE_FIRMWARE: main_processor_info,
DEVICE_MODEL: main_processor_info[MODEL_IN_MAINPROCESSOR],
}
@callback
def _async_map_data_by_id(data):
"""Return a dict with the key being the id for a list of entries."""
return {entry[ATTR_ID]: entry for entry in data}
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| apache-2.0 |
kkarsnia/expunge | docs/commands.md | 458 | There are many commands available to help you build and test sites. Here are a few highlights to get started with.
## Watch For Changes & Automatically Refresh Across Devices
```sh
$ npm start
```
This outputs an IP address you can use to locally test and another that can be used on devices connected to your network.
## Deploying
The project is hooked up to Travis CI and any push to master will build and publish the site to http://expungetexas.org/
| apache-2.0 |
kawamon/hue | docs/gethue/content/jp/posts/2021-04-23-s3-file-access-without-any-credentials-and-signed-urls.md | 10261 | ---
title: ユーザーに資格情報キーを与えずにS3ファイルへの適切なアクセスを提供する
author: Hue Team
type: post
date: 2021-04-23T00:00:00+00:00
url: /blog/2021-04-23-s3-file-access-without-any-credentials-and-signed-urls/
sf_thumbnail_type:
- none
sf_thumbnail_link_type:
- link_to_post
sf_detail_type:
- none
sf_page_title:
- 1
sf_page_title_style:
- standard
sf_no_breadcrumbs:
- 1
sf_page_title_bg:
- none
sf_page_title_text_style:
- light
sf_background_image_size:
- cover
sf_social_sharing:
- 1
sf_related_articles:
- 1
sf_sidebar_config:
- left-sidebar
sf_left_sidebar:
- Sidebar-2
sf_right_sidebar:
- Sidebar-1
sf_caption_position:
- caption-right
sf_remove_promo_bar:
- 1
ampforwp-amp-on-off:
- default
categories:
- Version 4.10
- Development
---
AWS S3 署名付きURLを使用してバケット、キーを一覧表示し、ファイルを管理します。
[SQL クエリ](https://medium.com/data-querying/interactively-querying-hbase-via-sql-273013e5b3cc) は、データをオープンにし、ユーザーが確固たる事実に裏打ちされた意思決定を行うのに役立ちます。 しかし、ユーザーが必要とするデータがデータウェアハウスにまだ存在しない場合はどうでしょうか?
より多くのセルフサービス体験を提供するために、ユーザーは [自分のデータのクエリ](https://gethue.com/querying-exploring-the-instacart-dataset-part-1-ingesting-the-data/) や結合を行い、クエリ結果ファイルをエクスポートして共有することができます。
ここでは、 [Hueエディタ](http://gethue.com/) プロジェクトで、署名付きURLが導入された実際のシナリオを紹介します。

<br>
*Hueは、HDFS、S3、ADLSのいずれに対しても、同じで一貫したファイル・ブラウジング・インターフェースを提供する*
HueのSQLエディタは、 [ファイルブラウザ](https://gethue.com/introducing-s3-support-in-hue/)を介してクラウドストレージへの透過的なアクセスを提供してきました。 S3やADLSのネイティブなWeb UIに直接アクセスすると使いづらく、クラウドに依存しないため、このアプリは非常に人気があります(また、S3やADLSのインターフェイスは、シンプルさと非エンジニア向けに設計されていません)。 さらに、SQLユーザーのほとんどは、同じように見えるHDFSブラウザに慣れています。
しかし、 これは通常S3資格情報キーのセットをHue Serverに提供し、それらを全員で共有する必要があるため、クラスタ管理者にとっては頭痛の種となります。 あまり安全ではありません。 ファイルブラウザを管理者のみに許可すると、機能が制限されてしまい、振り出しに戻ってしまいます。
そこで、 [S3 署名URL](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html) が登場し、この煩わしさを解消してくれます。

<br>
*Hue経由でS3とやりとりしているユーザーと共有資格キー(ベストではない)*

<br>
*ユーザーは、任意のS3リソースにアクセスできる個別の一時的なURLを取得 (安全かつ詳細なアクセスが可能)*
<br><br> これらのURLは、通常期限が切れる (例: 5分後など) ように設定されており、「署名」されています。これは、ユーザーが自動的に向こうにならないように変更できないため、ユーザーに対して公開しても安全です。
もう1つの利点は、Hue Web ServerがS3の資格情報を持つ必要がないことです。 Hueは、この例では、ユーザーが行いたいS3コールに相当するURLを提供するように、 [RAZ Server](https://blog.cloudera.com/access-control-for-azure-adls-cloud-object-storage/) に要求するだけです。

<br>
*以前(上) では、AWS Python SDK を直接呼び出してバケットのリストを取得し、それぞれのバケットを表すPythonオブジェクトを取得しなおしています。 以降(下)では、特別なURLを要求して、追加の認証なしにS3に直接HTTPリクエストを行い、XMLデータを取得しています。*
<hr />
このセクションでは、内部の実装について詳しく説明し、より多くの開発者をターゲットにしています。

<br>
*ネイティブ (Boto) と署名付きURL (RAZ) のアクセスパスをより詳細に説明します。 最も重要なのは、署名付き URL を要求または生成するにはBoto S3 クラスをオーバーライドする必要があり、Boto ライブラリですべてを行うのではなく、HTTP コールを行う必要があることです。 S3クレデンシャルキーはなくなり、S3接続オブジェクトも使用されません。(これを保証するためにNoneに設定されています)。*
マジックピースの1つはRAZサーバーで、S3コールを署名付きURLに変換することができます。 RAZ はまた、 [Apache Ranger](https://ranger.apache.org/) を活用しており、認可と細かいパーミッションを提供しています。(つまり、誰がこのバケットにアクセスできるのか、 誰がこのディレクトリにファイルをアップロードできるのか)
RAZはオープンソースではありませんが、URL生成の基本的なロジックは以下のようになっています。
ここでは、「バケットの一覧」のような呼び出しを、署名付きのS3のURLで置き換える方法をデモしたコードの一部を紹介します。
* Boto3: [create_presigned_url()](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html)
* Boto2: [generate_url()](http://boto.cloudhackers.com/en/latest/ref/s3.html#boto.s3.connection.S3Connection.generate_url), 例: connection.generate_url(3600, ‘GET’)
そして、それを呼び出し、XML を Python オブジェクトにアンマーシャルで戻す方法です。
import boto
import xml.sax
import requests
from boto.resultset import ResultSet
from boto.s3.bucket import Bucket
tmp_url = 'https://s3-us-west-1.amazonaws.com/?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIA23E77ZX2HVY76YGL%2F20210422%2Fus-west-1%2Fs3%2Faws4_request&X-Amz-Date=20210422T213700Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=2efc90228ec9255636de27dab661e071a931f0aea7b51a09027f4747d0a78f6e'
response = requests.get(tmp_url)
print(response)
rs = ResultSet([('Bucket', Bucket)])
h = boto.handler.XmlHandler(rs, None)
xml.sax.parseString(response.content, h)
print(rs)
print([k for k in rs[0].list(prefix='data/')])
これにより、ネイティブのBotoを使用している場合と同じバケットオブジェクトが表示されます。
> <Response [200]>
[<Bucket: demo-gethue>, <Bucket: gethue-test>]
*全体像:主に左側には、Hue File System ライブラリがあります。これは、あらゆるストレージシステム(HDFS、S3ネイティブ、S3 via Signed URL、ADLS...) に対してファイルブラウジングを提供するための汎用的なものです。 右側では、呼び出しごとに署名付き URL を生成できるサービスと相互作用するクライアントを構築する方法についてです*
確かに複雑に見えますが、クラウドの世界で真のセルフサービスクエリを提供するには、少なくとも1つのピースが解明されなければなりません :)
Hueは [オープンソース](https://github.com/cloudera/hue/) であり、この機能は次の4.10リリースに搭載される予定です。
それでは、皆さんにデータクエリーを楽しんでいただきたいと思います!
Romain from the Hue Team
| apache-2.0 |
working-wa/whats-my-wage-app | www/js/AddressInSeattleView.js | 2780 | var AddressInSeattleView = function (answerService) {
this.initialize = function () {
// Define a div wrapper for the view (used to attach events)
this.$el = $('<div/>');
var queryAddress = function(evt) {
var geocodeDeferred = $.Deferred();
var geocoder = new google.maps.Geocoder();
geocoder.geocode({ address: $("#employer-address").val() },function(results, status) {
if(results.length == 0) {
geocodeDeferred.reject("Error geocoding");
} else {
geocodeDeferred.resolve(results);
}
});
var loadCityLimitsDeferred = $.Deferred();
$.ajax({
dataType: "json",
url: "data/city-limits.json",
success: function(cityLimits) {
loadCityLimitsDeferred.resolve(cityLimits);
},
error: function(response, status, errorThrown) {
loadCityLimitsDeferred.reject("Error loading city limits");
}
});
var onGeocodeAndLoad = function(results, cityLimits) {
var ww = Wherewolf();
ww.add("Seattle", cityLimits);
var lngLat, inSeattle;
//For each geocoder result
for (var i = 0; i < results.length; i++) {
lngLat = {
lng: results[0].geometry.location.lng(),
lat: results[0].geometry.location.lat()
};
inSeattle = ww.find(lngLat,{
layer:"Seattle",
wholeFeature: true
});
//If it's a match, stop
if (inSeattle) {
answerService.saveAnswer("work-seattle","yes");
var resultDiv = $(this.$el.find(".result")).html("In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#question/number-employees");
continueButton.removeClass("hidden");
return;
}
}
answerService.saveAnswer("work-seattle","no");
var resultDiv = $(this.$el.find(".result")).html("Not In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#results");
continueButton.removeClass("hidden");
}
var onFailedGeocodeOrLoad = function(err1, err2) {
$(this.$el.find(".result")).html("Unable to Determine");
};
$.when(geocodeDeferred, loadCityLimitsDeferred).done(onGeocodeAndLoad.bind(this)).fail( onFailedGeocodeOrLoad.bind(this));
};
this.$el.on("click",".query", queryAddress.bind(this));
this.render();
};
this.render = function() {
this.$el.html(this.template());
return this;
};
this.initialize();
}
| apache-2.0 |
dsyang/buck | test/com/facebook/buck/parser/BuildFileSpecTest.java | 11992 | /*
* Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.parser;
import static org.junit.Assert.assertEquals;
import com.facebook.buck.config.Config;
import com.facebook.buck.config.ConfigBuilder;
import com.facebook.buck.io.FakeWatchmanClient;
import com.facebook.buck.io.ProjectFilesystem;
import com.facebook.buck.io.ProjectWatch;
import com.facebook.buck.io.Watchman;
import com.facebook.buck.rules.Cell;
import com.facebook.buck.rules.TestCellBuilder;
import com.facebook.buck.testutil.FakeProjectFilesystem;
import com.facebook.buck.util.HumanReadableException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
public class BuildFileSpecTest {
@Rule public ExpectedException thrown = ExpectedException.none();
@Rule public TemporaryFolder tmp = new TemporaryFolder();
@Test
public void recursiveVsNonRecursive() throws IOException, InterruptedException {
FakeProjectFilesystem filesystem = new FakeProjectFilesystem();
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.touch(buildFile);
Path nestedBuildFile = Paths.get("a", "b", "BUCK");
filesystem.mkdirs(nestedBuildFile.getParent());
filesystem.touch(nestedBuildFile);
// Test a non-recursive spec.
BuildFileSpec nonRecursiveSpec =
BuildFileSpec.fromPath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
ImmutableSet<Path> actualBuildFiles =
nonRecursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
// Test a recursive spec.
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
expectedBuildFiles =
ImmutableSet.of(filesystem.resolve(buildFile), filesystem.resolve(nestedBuildFile));
actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void recursiveIgnorePaths() throws IOException, InterruptedException {
Path ignoredBuildFile = Paths.get("a", "b", "BUCK");
Config config = ConfigBuilder.createFromText("[project]", "ignore = a/b");
ProjectFilesystem filesystem = new ProjectFilesystem(tmp.getRoot().toPath(), config);
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.writeContentsToPath("", buildFile);
filesystem.mkdirs(ignoredBuildFile.getParent());
filesystem.writeContentsToPath("", ignoredBuildFile);
// Test a recursive spec with an ignored dir.
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void findWithWatchmanSucceeds() throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
FakeWatchmanClient fakeWatchmanClient =
new FakeWatchmanClient(
0,
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK"))));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(fakeWatchmanClient)))
.build();
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void findWithWatchmanThrowsOnFailure() throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
FakeWatchmanClient fakeWatchmanClient =
new FakeWatchmanClient(
0,
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK"))),
new IOException("Whoopsie!"));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(fakeWatchmanClient)))
.build();
thrown.expect(IOException.class);
thrown.expectMessage("Whoopsie!");
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
}
@Test
public void findWithWatchmanFallsBackToFilesystemOnTimeout()
throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.touch(buildFile);
Path nestedBuildFile = Paths.get("a", "b", "BUCK");
filesystem.mkdirs(nestedBuildFile.getParent());
filesystem.touch(nestedBuildFile);
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
FakeWatchmanClient timingOutWatchmanClient =
new FakeWatchmanClient(
// Pretend the query takes a very very long time.
TimeUnit.SECONDS.toNanos(Long.MAX_VALUE),
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK", "a/b/BUCK"))));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(timingOutWatchmanClient)))
.build();
ImmutableSet<Path> expectedBuildFiles =
ImmutableSet.of(filesystem.resolve(buildFile), filesystem.resolve(nestedBuildFile));
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void testWildcardFolderNotFound() throws IOException, InterruptedException {
FakeProjectFilesystem filesystem = new FakeProjectFilesystem();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(filesystem.resolve("foo/bar"), filesystem.getRootPath());
thrown.expect(HumanReadableException.class);
thrown.expectMessage("could not be found");
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
}
}
| apache-2.0 |
n9/SaltarelleWeb | Web/Generated/Html/DOMRectList.cs | 642 | using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace System.Html {
[IgnoreNamespace, Imported(ObeysTypeSystem = true)]
public partial class DOMRectList {
internal DOMRectList() {
}
[IndexerName("__Item"), IntrinsicProperty]
public DOMRect this[int index] {
get {
return default(DOMRect);
}
}
[EnumerateAsArray, InlineCode("new {$System.ArrayEnumerator}({this})")]
public IEnumerator<DOMRect> GetEnumerator() {
return null;
}
public DOMRect Item(int index) {
return default(DOMRect);
}
[IntrinsicProperty]
public int Length {
get {
return 0;
}
}
}
}
| apache-2.0 |
jk1/intellij-community | platform/lang-api/src/com/intellij/execution/runners/ExecutionEnvironmentBuilder.java | 8057 | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.runners;
import com.intellij.execution.*;
import com.intellij.execution.configurations.ConfigurationPerRunnerSettings;
import com.intellij.execution.configurations.RunConfiguration;
import com.intellij.execution.configurations.RunProfile;
import com.intellij.execution.configurations.RunnerSettings;
import com.intellij.execution.ui.RunContentDescriptor;
import com.intellij.openapi.actionSystem.DataContext;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.UserDataHolderBase;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public final class ExecutionEnvironmentBuilder {
@NotNull private RunProfile myRunProfile;
@NotNull private ExecutionTarget myTarget = DefaultExecutionTarget.INSTANCE;
@NotNull private final Project myProject;
@Nullable private RunnerSettings myRunnerSettings;
@Nullable private ConfigurationPerRunnerSettings myConfigurationSettings;
@Nullable private RunContentDescriptor myContentToReuse;
@Nullable private RunnerAndConfigurationSettings myRunnerAndConfigurationSettings;
@Nullable private String myRunnerId;
private ProgramRunner<?> myRunner;
private boolean myAssignNewId;
@NotNull private Executor myExecutor;
@Nullable private DataContext myDataContext;
private final UserDataHolderBase myUserData = new UserDataHolderBase();
public ExecutionEnvironmentBuilder(@NotNull Project project, @NotNull Executor executor) {
myProject = project;
myExecutor = executor;
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Project project, @NotNull Executor executor, @NotNull RunProfile runProfile) throws ExecutionException {
ExecutionEnvironmentBuilder builder = createOrNull(project, executor, runProfile);
if (builder == null) {
throw new ExecutionException("Cannot find runner for " + runProfile.getName());
}
return builder;
}
@Nullable
public static ExecutionEnvironmentBuilder createOrNull(@NotNull Project project, @NotNull Executor executor, @NotNull RunProfile runProfile) {
ProgramRunner runner = RunnerRegistry.getInstance().getRunner(executor.getId(), runProfile);
if (runner == null) {
return null;
}
return new ExecutionEnvironmentBuilder(project, executor).runner(runner).runProfile(runProfile);
}
@Nullable
public static ExecutionEnvironmentBuilder createOrNull(@NotNull Executor executor, @NotNull RunnerAndConfigurationSettings settings) {
ExecutionEnvironmentBuilder builder = createOrNull(settings.getConfiguration().getProject(), executor, settings.getConfiguration());
return builder == null ? null : builder.runnerAndSettings(builder.myRunner, settings);
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Executor executor, @NotNull RunnerAndConfigurationSettings settings) throws ExecutionException {
RunConfiguration configuration = settings.getConfiguration();
ExecutionEnvironmentBuilder builder = create(configuration.getProject(), executor, configuration);
return builder.runnerAndSettings(builder.myRunner, settings);
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Executor executor, @NotNull RunConfiguration configuration) {
return new ExecutionEnvironmentBuilder(configuration.getProject(), executor).runProfile(configuration);
}
@NotNull
Executor getExecutor() {
return myExecutor;
}
/**
* Creates an execution environment builder initialized with a copy of the specified environment.
*
* @param copySource the environment to copy from.
*/
public ExecutionEnvironmentBuilder(@NotNull ExecutionEnvironment copySource) {
myTarget = copySource.getExecutionTarget();
myProject = copySource.getProject();
myRunnerAndConfigurationSettings = copySource.getRunnerAndConfigurationSettings();
myRunProfile = copySource.getRunProfile();
myRunnerSettings = copySource.getRunnerSettings();
myConfigurationSettings = copySource.getConfigurationSettings();
myRunner = copySource.getRunner();
myContentToReuse = copySource.getContentToReuse();
myExecutor = copySource.getExecutor();
copySource.copyUserDataTo(myUserData);
}
public ExecutionEnvironmentBuilder target(@Nullable ExecutionTarget target) {
if (target != null) {
myTarget = target;
}
return this;
}
public ExecutionEnvironmentBuilder activeTarget() {
myTarget = ExecutionTargetManager.getActiveTarget(myProject);
return this;
}
public ExecutionEnvironmentBuilder runnerAndSettings(@NotNull ProgramRunner runner,
@NotNull RunnerAndConfigurationSettings settings) {
myRunnerAndConfigurationSettings = settings;
myRunProfile = settings.getConfiguration();
myRunnerSettings = settings.getRunnerSettings(runner);
myConfigurationSettings = settings.getConfigurationSettings(runner);
myRunner = runner;
return this;
}
public ExecutionEnvironmentBuilder runnerSettings(@Nullable RunnerSettings runnerSettings) {
myRunnerSettings = runnerSettings;
return this;
}
public ExecutionEnvironmentBuilder contentToReuse(@Nullable RunContentDescriptor contentToReuse) {
myContentToReuse = contentToReuse;
return this;
}
public ExecutionEnvironmentBuilder runProfile(@NotNull RunProfile runProfile) {
myRunProfile = runProfile;
return this;
}
public ExecutionEnvironmentBuilder runner(@NotNull ProgramRunner<?> runner) {
myRunner = runner;
return this;
}
public ExecutionEnvironmentBuilder dataContext(@Nullable DataContext dataContext) {
myDataContext = dataContext;
return this;
}
public ExecutionEnvironmentBuilder executor(@NotNull Executor executor) {
myExecutor = executor;
return this;
}
@NotNull
public ExecutionEnvironment build() {
ExecutionEnvironment environment = null;
ExecutionEnvironmentProvider environmentProvider = ServiceManager.getService(myProject, ExecutionEnvironmentProvider.class);
if (environmentProvider != null) {
environment = environmentProvider.createExecutionEnvironment(
myProject, myRunProfile, myExecutor, myTarget, myRunnerSettings, myConfigurationSettings, myRunnerAndConfigurationSettings);
}
if (environment == null && myRunner == null) {
if (myRunnerId == null) {
myRunner = RunnerRegistry.getInstance().getRunner(myExecutor.getId(), myRunProfile);
}
else {
myRunner = RunnerRegistry.getInstance().findRunnerById(myRunnerId);
}
}
if (environment == null && myRunner == null) {
throw new IllegalStateException("Runner must be specified");
}
if (environment == null) {
environment = new ExecutionEnvironment(myRunProfile, myExecutor, myTarget, myProject, myRunnerSettings,
myConfigurationSettings, myContentToReuse, myRunnerAndConfigurationSettings, myRunner);
}
if (myAssignNewId) {
environment.assignNewExecutionId();
}
if (myDataContext != null) {
environment.setDataContext(myDataContext);
}
myUserData.copyUserDataTo(environment);
return environment;
}
public void buildAndExecute() throws ExecutionException {
ExecutionEnvironment environment = build();
myRunner.execute(environment);
}
}
| apache-2.0 |
cedricporter/everlost | frameworks/cocos2d-x/cocos/scripting/lua-bindings/auto/api/GLProgram.lua | 4147 |
--------------------------------
-- @module GLProgram
-- @extend Ref
--------------------------------
-- @function [parent=#GLProgram] getFragmentShaderLog
-- @param self
-- @return string#string ret (return value: string)
--------------------------------
-- @function [parent=#GLProgram] initWithByteArrays
-- @param self
-- @param #char char
-- @param #char char
-- @return bool#bool ret (return value: bool)
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWithMatrix4fv
-- @param self
-- @param #int int
-- @param #float float
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] initWithFilenames
-- @param self
-- @param #string str
-- @param #string str
-- @return bool#bool ret (return value: bool)
--------------------------------
-- @function [parent=#GLProgram] getUniformLocationForName
-- @param self
-- @param #char char
-- @return int#int ret (return value: int)
--------------------------------
-- @function [parent=#GLProgram] use
-- @param self
--------------------------------
-- @function [parent=#GLProgram] getVertexShaderLog
-- @param self
-- @return string#string ret (return value: string)
--------------------------------
-- overload function: setUniformsForBuiltins(kmMat4)
--
-- overload function: setUniformsForBuiltins()
--
-- @function [parent=#GLProgram] setUniformsForBuiltins
-- @param self
-- @param #kmMat4 kmmat4
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith3i
-- @param self
-- @param #int int
-- @param #int int
-- @param #int int
-- @param #int int
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith3iv
-- @param self
-- @param #int int
-- @param #int int
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] updateUniforms
-- @param self
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith4iv
-- @param self
-- @param #int int
-- @param #int int
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] getUniformLocation
-- @param self
-- @param #char char
-- @return int#int ret (return value: int)
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith1i
-- @param self
-- @param #int int
-- @param #int int
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith2iv
-- @param self
-- @param #int int
-- @param #int int
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWithMatrix3fv
-- @param self
-- @param #int int
-- @param #float float
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] reset
-- @param self
--------------------------------
-- @function [parent=#GLProgram] bindAttribLocation
-- @param self
-- @param #char char
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] getAttribLocation
-- @param self
-- @param #char char
-- @return int#int ret (return value: int)
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWithMatrix2fv
-- @param self
-- @param #int int
-- @param #float float
-- @param #unsigned int int
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith4i
-- @param self
-- @param #int int
-- @param #int int
-- @param #int int
-- @param #int int
-- @param #int int
--------------------------------
-- @function [parent=#GLProgram] link
-- @param self
-- @return bool#bool ret (return value: bool)
--------------------------------
-- @function [parent=#GLProgram] setUniformLocationWith2i
-- @param self
-- @param #int int
-- @param #int int
-- @param #int int
--------------------------------
-- @function [parent=#GLProgram] GLProgram
-- @param self
return nil
| apache-2.0 |
ahmed-mahran/hue | apps/beeswax/src/beeswax/server/hive_server2_lib.py | 38037 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import itertools
import json
import re
from itertools import imap
from operator import itemgetter
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util
from desktop.conf import DEFAULT_USER
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet, TProtocolVersion
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
DEFAULT_USER = DEFAULT_USER.get()
class HiveServerTable(Table):
"""
We get the table details from a DESCRIBE FORMATTED.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if beeswax_conf.THRIFT_VERSION.get() >= 7:
if not table_results.columns:
raise NoSuchObjectException()
self.table = table_results.columns
else: # Deprecated. To remove in Hue 4.
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW'
@property
def partition_keys(self):
try:
return [PartitionKeyCompatible(row['col_name'], row['data_type'], row['comment']) for row in self._get_partition_column()]
except:
LOG.exception('failed to get partition keys')
return []
@property
def path_location(self):
try:
rows = self.describe
rows = [row for row in rows if row['col_name'].startswith('Location:')]
if rows:
return rows[0]['data_type']
except:
LOG.exception('failed to get path location')
return None
@property
def cols(self):
rows = self.describe
col_row_index = 2
try:
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index] + self._get_partition_column()
except ValueError: # DESCRIBE on columns and nested columns does not contain add'l rows beyond cols
return rows[col_row_index:]
except:
# Impala does not have it
return rows
def _get_partition_column(self):
rows = self.describe
try:
col_row_index = map(itemgetter('col_name'), rows).index('# Partition Information') + 3
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index]
except:
# Impala does not have it
return []
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def properties(self):
rows = self.describe
col_row_index = 2
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return [{
'col_name': prop['col_name'].strip() if prop['col_name'] else prop['col_name'],
'data_type': prop['data_type'].strip() if prop['data_type'] else prop['data_type'],
'comment': prop['comment'].strip() if prop['comment'] else prop['comment']
} for prop in rows[col_row_index + end_cols_index + 1:]
]
@property
def stats(self):
rows = self.properties
col_row_index = map(itemgetter('col_name'), rows).index('Table Parameters:') + 1
end_cols_index = map(itemgetter('data_type'), rows[col_row_index:]).index(None)
return rows[col_row_index:][:end_cols_index]
@property
def has_complex(self):
has_complex = False
complex_types = ["struct", "array", "map", "uniontype"]
patterns = [re.compile(typ) for typ in complex_types]
for column in self.cols:
if isinstance(column, dict) and 'data_type' in column:
column_type = column['data_type']
else: # Col object
column_type = column.type
if column_type and any(p.match(column_type.lower()) for p in patterns):
has_complex = True
break
return has_complex
class HiveServerTRowSet2:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return not self.row_set.columns or not HiveServerTColumnValue2(self.row_set.columns[0]).val
def cols(self, col_names):
cols_rows = []
rs = HiveServerTRow2(self.row_set.columns, self.schema)
cols = [rs.full_col(name) for name in col_names]
for cols_row in itertools.izip(*cols):
cols_rows.append(dict(itertools.izip(col_names, cols_row)))
return cols_rows
def __iter__(self):
return self
def next(self):
if self.row_set.columns:
return HiveServerTRow2(self.row_set.columns, self.schema)
else:
raise StopIteration
class HiveServerTRow2:
def __init__(self, cols, schema):
self.cols = cols
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val[0] # Return only first element
def full_col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val # Return the full column and its values
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
try:
return [HiveServerTColumnValue2(field).val.pop(0) for field in self.cols]
except IndexError:
raise StopIteration
class HiveServerTColumnValue2:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# Could directly get index from schema but would need to cache the schema
if self.column_value.stringVal:
return self._get_val(self.column_value.stringVal)
elif self.column_value.i16Val is not None:
return self._get_val(self.column_value.i16Val)
elif self.column_value.i32Val is not None:
return self._get_val(self.column_value.i32Val)
elif self.column_value.i64Val is not None:
return self._get_val(self.column_value.i64Val)
elif self.column_value.doubleVal is not None:
return self._get_val(self.column_value.doubleVal)
elif self.column_value.boolVal is not None:
return self._get_val(self.column_value.boolVal)
elif self.column_value.byteVal is not None:
return self._get_val(self.column_value.byteVal)
elif self.column_value.binaryVal is not None:
return self._get_val(self.column_value.binaryVal)
@classmethod
def _get_val(cls, column):
column.values = cls.set_nulls(column.values, column.nulls)
column.nulls = '' # Clear the null values for not re-marking again the column with nulls at the next call
return column.values
@classmethod
def mark_nulls(cls, values, bytestring):
mask = bytearray(bytestring)
for n in mask:
yield n & 0x01
yield n & 0x02
yield n & 0x04
yield n & 0x08
yield n & 0x10
yield n & 0x20
yield n & 0x40
yield n & 0x80
@classmethod
def set_nulls(cls, values, bytestring):
if bytestring == '' or re.match('^(\x00)+$', bytestring): # HS2 has just \x00 or '', Impala can have \x00\x00...
return values
else:
_values = [None if is_null else value for value, is_null in itertools.izip(values, cls.mark_nulls(values, bytestring))]
if len(values) != len(_values): # HS2 can have just \x00\x01 instead of \x00\x01\x00...
_values.extend(values[len(_values):])
return _values
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle, query_server):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
if query_server['server_name'] == 'impala':
self.has_more = results.hasMoreRows
else:
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['data_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
if beeswax_conf.THRIFT_VERSION.get() >= 7:
HiveServerTRow = HiveServerTRow2
HiveServerTRowSet = HiveServerTRowSet2
else:
# Deprecated. To remove in Hue 4.
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {
'KERBEROS': 'GSSAPI',
'NONE': 'PLAIN',
'NOSASL': 'NOSASL',
'LDAP': 'PLAIN',
'PAM': 'PLAIN'
}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s, auth_username=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
from impala import conf as impala_conf
ssl_enabled = impala_conf.SSL.ENABLED.get()
ca_certs = impala_conf.SSL.CACERTS.get()
keyfile = impala_conf.SSL.KEY.get()
certfile = impala_conf.SSL.CERT.get()
validate = impala_conf.SSL.VALIDATE.get()
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = hiveserver2_use_ssl()
ca_certs = beeswax_conf.SSL.CACERTS.get()
keyfile = beeswax_conf.SSL.KEY.get()
certfile = beeswax_conf.SSL.CERT.get()
validate = beeswax_conf.SSL.VALIDATE.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
if auth_username:
username = auth_username
password = auth_password
else:
username = user.username
password = None
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=username,
password=password,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=ca_certs,
keyfile=keyfile,
certfile=certfile,
validate=validate,
transport_mode=query_server.get('transport_mode', 'socket'),
http_url=query_server.get('http_url', '')
)
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
auth_username = self.query_server['auth_username'] # Pass-through LDAP/PAM authentication
auth_password = self.query_server['auth_password']
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
if auth_password: # Force LDAP/PAM.. auth if auth_password is provided
use_sasl = True
mechanism = HiveServerClient.HS2_MECHANISMS['NONE']
else:
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.') % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys()))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE', 'LDAP', 'PAM')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password
def open_session(self, user):
kwargs = {
'client_protocol': beeswax_conf.THRIFT_VERSION.get() - 1,
'username': user.username, # If SASL or LDAP, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': DEFAULT_USER})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
if self.query_server['server_name'] == 'sparksql': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
properties = json.dumps(res.configuration)
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion,
properties=properties)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session|Client session expired', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_database(self, database):
if self.query_server['server_name'] == 'impala':
raise NotImplementedError(_("Impala has not implemented the 'DESCRIBE DATABASE' command: %(issue_ref)s") % {
'issue_ref': "https://issues.cloudera.org/browse/IMPALA-2196"
})
query = 'DESCRIBE DATABASE EXTENDED `%s`' % (database)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
cols = ('db_name', 'comment', 'location')
if len(HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)) != 1:
raise ValueError(_("%(query)s returned more than 1 row") % {'query': query})
return HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)[0] # Should only contain one row
def get_tables_meta(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
cols = ('TABLE_NAME', 'TABLE_TYPE', 'REMARKS')
return HiveServerTRowSet(results.results, schema.schema).cols(cols)
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name, partition_spec=None):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
if partition_spec:
query = 'DESCRIBE FORMATTED `%s`.`%s` PARTITION(%s)' % (database, table_name, partition_spec)
else:
query = 'DESCRIBE FORMATTED `%s`.`%s`' % (database, table_name)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_FIRST):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration, orientation=orientation)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_NEXT):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
configuration['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows, orientation=orientation), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
confOverlay['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# Fetch until the result is empty dues to a HS2 bug instead of looking at hasMoreRows
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_FIRST, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet and TFetchOrientation.FETCH_FIRST: # Only fetch for the first call that should be with start_over
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def fetch_log(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows, fetchType=1)
res = self.call(self._client.FetchResults, req)
if beeswax_conf.THRIFT_VERSION.get() >= 7:
lines = res.results.columns[0].stringVal.values
else:
lines = imap(lambda r: r.colVals[0].stringVal.value, res.results.rows)
return '\n'.join(lines)
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement='EXPLAIN %s' % query_statement, configuration=configuration, orientation=TFetchOrientation.FETCH_NEXT)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
LOG.exception('server does not support GetLog')
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, partition_spec=None, max_parts=None, reverse_sort=True):
table = self.get_table(database, table_name)
if max_parts is None or max_parts <= 0:
max_rows = 10000
else:
max_rows = 1000 if max_parts <= 250 else max_parts
query = 'SHOW PARTITIONS `%s`.`%s`' % (database, table_name)
if partition_spec:
query += ' PARTITION(%s)' % partition_spec
partition_table = self.execute_query_statement(query, max_rows=max_rows)
partitions = [PartitionValueCompatible(partition, table) for partition in partition_table.rows()]
if reverse_sort:
partitions.reverse()
return partitions[:max_parts]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def cols(self):
return [
type('Col', (object,), {
'name': col.get('col_name', '').strip() if col.get('col_name') else '',
'type': col.get('data_type', '').strip() if col.get('data_type') else '',
'comment': col.get('comment', '').strip() if col.get('comment') else ''
}) for col in HiveServerTable.cols.fget(self)
]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment
def __eq__(self, other):
return isinstance(other, PartitionKeyCompatible) and \
self.name == other.name and \
self.type == other.type and \
self.comment == other.comment
def __repr__(self):
return 'PartitionKey(name:%s, type:%s, comment:%s)' % (self.name, self.type, self.comment)
class PartitionValueCompatible:
def __init__(self, partition_row, table, properties=None):
if properties is None:
properties = {}
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
partition = partition_row[0]
parts = partition.split('/')
self.partition_spec = ','.join(["%s='%s'" % (pv[0], pv[1]) for pv in [part.split('=') for part in parts]])
self.values = [pv[1] for pv in [part.split('=') for part in parts]]
self.sd = type('Sd', (object,), properties,)
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle, start_over=True):
operationHandle = handle.get_rpc_handle()
if beeswax_conf.USE_GET_LOG_API.get() or self.query_server['server_name'] == 'impala':
return self._client.get_log(operationHandle)
else:
if start_over:
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
return self._client.fetch_log(operationHandle, orientation=orientation, max_rows=-1)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_database(self, database):
return self._client.get_database(database)
def get_tables_meta(self, database, table_names):
tables = self._client.get_tables_meta(database, table_names)
massaged_tables = []
for table in tables:
massaged_tables.append({
'name': table['TABLE_NAME'],
'comment': table['REMARKS'],
'type': table['TABLE_TYPE'].capitalize()}
)
return massaged_tables
def get_tables(self, database, table_names):
tables = [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
tables.sort()
return tables
def get_table(self, database, table_name, partition_spec=None):
table = self._client.get_table(database, table_name, partition_spec)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, partition_spec, max_parts, reverse_sort=True):
return self._client.get_partitions(database, table_name, partition_spec, max_parts, reverse_sort)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
| apache-2.0 |
mohan82/myblog-api | persistence/blog.js | 2864 | "use strict";
var CONST = require('../persistence/sqlconst');
var util = require("util");
var DEFAULT_LIMIT = 50;
var MAX_THRESHOLD = 500;
var knexModule = require("knex");
/**
*
* @param name
* @constructor
*/
function Blog(knexConfig) {
this.knex = knexModule(knexConfig);
}
//DDL Functions
Blog.prototype.dropPostTable = function () {
console.info("Dropping table if exist");
return this.knex.schema.dropTableIfExists(CONST.POST.TABLE);
};
/**
* Table Create Post Table
*/
Blog.prototype.createPostTable = function () {
console.info("Creating %s table if exist", CONST.POST.TABLE);
return this.knex.schema.createTable(CONST.POST.TABLE, function (table) {
table.increments(CONST.POST.PK);
table.string(CONST.POST.GUID).unique();
table.string(CONST.POST.TITLE).unique()
.notNullable();
table.binary(CONST.POST.CONTENT)
.notNullable();
table.datetime(CONST.POST.PUB_DATE).index(CONST.POST.IDX_PUBDATE)
.notNullable();
});
};
Blog.prototype.cleanUp = function () {
console.log("Cleaning up Knex");
this.knex.destroy();
};
Blog.prototype.savePost = function (post) {
var record = {
"title": post.title,
"content": post.content,
"guid": post.guid,
"publication_date": post.publicationDate
};
return this.knex.insert(record).into(CONST.POST.TABLE);
};
Blog.prototype.deletePost = function (postId) {
console.info("Deleting post :%d", postId);
return this.knex(CONST.POST.TABLE).where(CONST.POST.PK, postId).del();
};
//Limit Helper functions
function checkLowerBoundLimit(limit) {
if (util.isNullOrUndefined(limit) || limit === 0) {
return DEFAULT_LIMIT;
} else {
return limit;
}
}
function checkUpperBoundLimit(value) {
if (!util.isNullOrUndefined(value) && value >= MAX_THRESHOLD) {
return MAX_THRESHOLD;
}
else {
return value;
}
}
Blog.prototype._determineDefaultLimit = function (limit) {
var result = checkLowerBoundLimit(limit);
result = checkUpperBoundLimit(result);
return result;
};
// Query functions
function selectAllColumns(knex) {
return knex.
select(CONST.POST.PK, CONST.POST.TITLE, CONST.POST.CONTENT,CONST.POST.PUB_DATE,CONST.POST.GUID).
from(CONST.POST.TABLE);
}
Blog.prototype.findPostById = function (postId) {
return selectAllColumns(this.knex).
where(CONST.POST.PK, postId);
};
Blog.prototype.getAllPosts = function (limit) {
return this.knex.select(CONST.POST.PK,CONST.POST.TITLE,CONST.POST.GUID,CONST.POST.PUB_DATE).
from(CONST.POST.TABLE).limit(this._determineDefaultLimit(limit));
};
Blog.prototype.findPostByTitle = function (title) {
return selectAllColumns(this.knex).
where(CONST.POST.TITLE, title);
};
module.exports = Blog;
| apache-2.0 |
wikimedia/phabricator | src/applications/diffusion/editor/DiffusionRepositoryEditEngine.php | 19968 | <?php
final class DiffusionRepositoryEditEngine
extends PhabricatorEditEngine {
const ENGINECONST = 'diffusion.repository';
private $versionControlSystem;
public function setVersionControlSystem($version_control_system) {
$this->versionControlSystem = $version_control_system;
return $this;
}
public function getVersionControlSystem() {
return $this->versionControlSystem;
}
public function isEngineConfigurable() {
return false;
}
public function isDefaultQuickCreateEngine() {
return true;
}
public function getQuickCreateOrderVector() {
return id(new PhutilSortVector())->addInt(300);
}
public function getEngineName() {
return pht('Repositories');
}
public function getSummaryHeader() {
return pht('Edit Repositories');
}
public function getSummaryText() {
return pht('Creates and edits repositories.');
}
public function getEngineApplicationClass() {
return 'PhabricatorDiffusionApplication';
}
protected function newEditableObject() {
$viewer = $this->getViewer();
$repository = PhabricatorRepository::initializeNewRepository($viewer);
$repository->setDetail('newly-initialized', true);
$vcs = $this->getVersionControlSystem();
if ($vcs) {
$repository->setVersionControlSystem($vcs);
}
// Pick a random open service to allocate this repository on, if any exist.
// If there are no services, we aren't in cluster mode and will allocate
// locally. If there are services but none permit allocations, we fail.
// Eventually we can make this more flexible, but this rule is a reasonable
// starting point as we begin to deploy cluster services.
$services = id(new AlmanacServiceQuery())
->setViewer(PhabricatorUser::getOmnipotentUser())
->withServiceTypes(
array(
AlmanacClusterRepositoryServiceType::SERVICETYPE,
))
->needProperties(true)
->execute();
if ($services) {
// Filter out services which do not permit new allocations.
foreach ($services as $key => $possible_service) {
if ($possible_service->getAlmanacPropertyValue('closed')) {
unset($services[$key]);
}
}
if (!empty($services)) {
shuffle($services);
$service = head($services);
$repository->setAlmanacServicePHID($service->getPHID());
}
}
return $repository;
}
protected function newObjectQuery() {
return new PhabricatorRepositoryQuery();
}
protected function getObjectCreateTitleText($object) {
return pht('Create Repository');
}
protected function getObjectCreateButtonText($object) {
return pht('Create Repository');
}
protected function getObjectEditTitleText($object) {
return pht('Edit Repository: %s', $object->getName());
}
protected function getObjectEditShortText($object) {
return $object->getDisplayName();
}
protected function getObjectCreateShortText() {
return pht('Create Repository');
}
protected function getObjectName() {
return pht('Repository');
}
protected function getObjectViewURI($object) {
return $object->getPathURI('manage/');
}
protected function getCreateNewObjectPolicy() {
return $this->getApplication()->getPolicy(
DiffusionCreateRepositoriesCapability::CAPABILITY);
}
protected function newPages($object) {
$panels = DiffusionRepositoryManagementPanel::getAllPanels();
$pages = array();
$uris = array();
foreach ($panels as $panel_key => $panel) {
$panel->setRepository($object);
$uris[$panel_key] = $panel->getPanelURI();
$page = $panel->newEditEnginePage();
if (!$page) {
continue;
}
$pages[] = $page;
}
$basics_key = DiffusionRepositoryBasicsManagementPanel::PANELKEY;
$basics_uri = $uris[$basics_key];
$more_pages = array(
id(new PhabricatorEditPage())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setViewURI($basics_uri)
->setFieldKeys(
array(
'encoding',
)),
id(new PhabricatorEditPage())
->setKey('extensions')
->setLabel(pht('Extensions'))
->setIsDefault(true),
);
foreach ($more_pages as $page) {
$pages[] = $page;
}
return $pages;
}
protected function willConfigureFields($object, array $fields) {
// Change the default field order so related fields are adjacent.
$after = array(
'policy.edit' => array('policy.push'),
);
$result = array();
foreach ($fields as $key => $value) {
$result[$key] = $value;
if (!isset($after[$key])) {
continue;
}
foreach ($after[$key] as $next_key) {
if (!isset($fields[$next_key])) {
continue;
}
unset($result[$next_key]);
$result[$next_key] = $fields[$next_key];
unset($fields[$next_key]);
}
}
return $result;
}
protected function buildCustomEditFields($object) {
$viewer = $this->getViewer();
$policies = id(new PhabricatorPolicyQuery())
->setViewer($viewer)
->setObject($object)
->execute();
$fetch_value = $object->getFetchRules();
$track_value = $object->getTrackOnlyRules();
$permanent_value = $object->getPermanentRefRules();
$automation_instructions = pht(
"Configure **Repository Automation** to allow Phabricator to ".
"write to this repository.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$staging_instructions = pht(
"To make it easier to run integration tests and builds on code ".
"under review, you can configure a **Staging Area**. When `arc` ".
"creates a diff, it will push a copy of the changes to the ".
"configured staging area with a corresponding tag.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$subpath_instructions = pht(
'If you want to import only part of a repository, like `trunk/`, '.
'you can set a path in **Import Only**. Phabricator will ignore '.
'commits which do not affect this path.');
$filesize_warning = null;
if ($object->isGit()) {
$git_binary = PhutilBinaryAnalyzer::getForBinary('git');
$git_version = $git_binary->getBinaryVersion();
$filesize_version = '1.8.4';
if (version_compare($git_version, $filesize_version, '<')) {
$filesize_warning = pht(
'(WARNING) {icon exclamation-triangle} The version of "git" ("%s") '.
'installed on this server does not support '.
'"--batch-check=<format>", a feature required to enforce filesize '.
'limits. Upgrade to "git" %s or newer to use this feature.',
$git_version,
$filesize_version);
}
}
$track_instructions = pht(
'WARNING: The "Track Only" feature is deprecated. Use "Fetch Refs" '.
'and "Permanent Refs" instead. This feature will be removed in a '.
'future version of Phabricator.');
return array(
id(new PhabricatorSelectEditField())
->setKey('vcs')
->setLabel(pht('Version Control System'))
->setTransactionType(
PhabricatorRepositoryVCSTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setIsCopyable(true)
->setOptions(PhabricatorRepositoryType::getAllRepositoryTypes())
->setDescription(pht('Underlying repository version control system.'))
->setConduitDescription(
pht(
'Choose which version control system to use when creating a '.
'repository.'))
->setConduitTypeDescription(pht('Version control system selection.'))
->setValue($object->getVersionControlSystem()),
id(new PhabricatorTextEditField())
->setKey('name')
->setLabel(pht('Name'))
->setIsRequired(true)
->setTransactionType(
PhabricatorRepositoryNameTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository name.'))
->setConduitDescription(pht('Rename the repository.'))
->setConduitTypeDescription(pht('New repository name.'))
->setValue($object->getName()),
id(new PhabricatorTextEditField())
->setKey('callsign')
->setLabel(pht('Callsign'))
->setTransactionType(
PhabricatorRepositoryCallsignTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository callsign.'))
->setConduitDescription(pht('Change the repository callsign.'))
->setConduitTypeDescription(pht('New repository callsign.'))
->setValue($object->getCallsign()),
id(new PhabricatorTextEditField())
->setKey('shortName')
->setLabel(pht('Short Name'))
->setTransactionType(
PhabricatorRepositorySlugTransaction::TRANSACTIONTYPE)
->setDescription(pht('Short, unique repository name.'))
->setConduitDescription(pht('Change the repository short name.'))
->setConduitTypeDescription(pht('New short name for the repository.'))
->setValue($object->getRepositorySlug()),
id(new PhabricatorRemarkupEditField())
->setKey('description')
->setLabel(pht('Description'))
->setTransactionType(
PhabricatorRepositoryDescriptionTransaction::TRANSACTIONTYPE)
->setDescription(pht('Repository description.'))
->setConduitDescription(pht('Change the repository description.'))
->setConduitTypeDescription(pht('New repository description.'))
->setValue($object->getDetail('description')),
id(new PhabricatorTextEditField())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setIsCopyable(true)
->setTransactionType(
PhabricatorRepositoryEncodingTransaction::TRANSACTIONTYPE)
->setDescription(pht('Default text encoding.'))
->setConduitDescription(pht('Change the default text encoding.'))
->setConduitTypeDescription(pht('New text encoding.'))
->setValue($object->getDetail('encoding')),
id(new PhabricatorBoolEditField())
->setKey('allowDangerousChanges')
->setLabel(pht('Allow Dangerous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Dangerous Changes'),
pht('Allow Dangerous Changes'))
->setTransactionType(
PhabricatorRepositoryDangerousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit dangerous changes to be made.'))
->setConduitDescription(pht('Allow or prevent dangerous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowDangerousChanges()),
id(new PhabricatorBoolEditField())
->setKey('allowEnormousChanges')
->setLabel(pht('Allow Enormous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Enormous Changes'),
pht('Allow Enormous Changes'))
->setTransactionType(
PhabricatorRepositoryEnormousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit enormous changes to be made.'))
->setConduitDescription(pht('Allow or prevent enormous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowEnormousChanges()),
id(new PhabricatorSelectEditField())
->setKey('status')
->setLabel(pht('Status'))
->setTransactionType(
PhabricatorRepositoryActivateTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setOptions(PhabricatorRepository::getStatusNameMap())
->setDescription(pht('Active or inactive status.'))
->setConduitDescription(pht('Active or deactivate the repository.'))
->setConduitTypeDescription(pht('New repository status.'))
->setValue($object->getStatus()),
id(new PhabricatorTextEditField())
->setKey('defaultBranch')
->setLabel(pht('Default Branch'))
->setTransactionType(
PhabricatorRepositoryDefaultBranchTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Default branch name.'))
->setConduitDescription(pht('Set the default branch name.'))
->setConduitTypeDescription(pht('New default branch name.'))
->setValue($object->getDetail('default-branch')),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('fetchRefs')
->setLabel(pht('Fetch Refs'))
->setTransactionType(
PhabricatorRepositoryFetchRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Fetch only these refs.'))
->setConduitDescription(pht('Set the fetched refs.'))
->setConduitTypeDescription(pht('New fetched refs.'))
->setValue($fetch_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('permanentRefs')
->setLabel(pht('Permanent Refs'))
->setTransactionType(
PhabricatorRepositoryPermanentRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Only these refs are considered permanent.'))
->setConduitDescription(pht('Set the permanent refs.'))
->setConduitTypeDescription(pht('New permanent ref rules.'))
->setValue($permanent_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('trackOnly')
->setLabel(pht('Track Only'))
->setTransactionType(
PhabricatorRepositoryTrackOnlyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setControlInstructions($track_instructions)
->setDescription(pht('Track only these branches.'))
->setConduitDescription(pht('Set the tracked branches.'))
->setConduitTypeDescription(pht('New tracked branches.'))
->setValue($track_value),
id(new PhabricatorTextEditField())
->setKey('importOnly')
->setLabel(pht('Import Only'))
->setTransactionType(
PhabricatorRepositorySVNSubpathTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Subpath to selectively import.'))
->setConduitDescription(pht('Set the subpath to import.'))
->setConduitTypeDescription(pht('New subpath to import.'))
->setValue($object->getDetail('svn-subpath'))
->setControlInstructions($subpath_instructions),
id(new PhabricatorTextEditField())
->setKey('stagingAreaURI')
->setLabel(pht('Staging Area URI'))
->setTransactionType(
PhabricatorRepositoryStagingURITransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Staging area URI.'))
->setConduitDescription(pht('Set the staging area URI.'))
->setConduitTypeDescription(pht('New staging area URI.'))
->setValue($object->getStagingURI())
->setControlInstructions($staging_instructions),
id(new PhabricatorDatasourceEditField())
->setKey('automationBlueprintPHIDs')
->setLabel(pht('Use Blueprints'))
->setTransactionType(
PhabricatorRepositoryBlueprintsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DrydockBlueprintDatasource())
->setDescription(pht('Automation blueprints.'))
->setConduitDescription(pht('Change automation blueprints.'))
->setConduitTypeDescription(pht('New blueprint PHIDs.'))
->setValue($object->getAutomationBlueprintPHIDs())
->setControlInstructions($automation_instructions),
id(new PhabricatorStringListEditField())
->setKey('symbolLanguages')
->setLabel(pht('Languages'))
->setTransactionType(
PhabricatorRepositorySymbolLanguagesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(
pht('Languages which define symbols in this repository.'))
->setConduitDescription(
pht('Change symbol languages for this repository.'))
->setConduitTypeDescription(
pht('New symbol languages.'))
->setValue($object->getSymbolLanguages()),
id(new PhabricatorDatasourceEditField())
->setKey('symbolRepositoryPHIDs')
->setLabel(pht('Uses Symbols From'))
->setTransactionType(
PhabricatorRepositorySymbolSourcesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DiffusionRepositoryDatasource())
->setDescription(pht('Repositories to link symbols from.'))
->setConduitDescription(pht('Change symbol source repositories.'))
->setConduitTypeDescription(pht('New symbol repositories.'))
->setValue($object->getSymbolSources()),
id(new PhabricatorBoolEditField())
->setKey('publish')
->setLabel(pht('Publish/Notify'))
->setTransactionType(
PhabricatorRepositoryNotifyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setOptions(
pht('Disable Notifications, Feed, and Herald'),
pht('Enable Notifications, Feed, and Herald'))
->setDescription(pht('Configure how changes are published.'))
->setConduitDescription(pht('Change publishing options.'))
->setConduitTypeDescription(pht('New notification setting.'))
->setValue(!$object->isPublishingDisabled()),
id(new PhabricatorPolicyEditField())
->setKey('policy.push')
->setLabel(pht('Push Policy'))
->setAliases(array('push'))
->setIsCopyable(true)
->setCapability(DiffusionPushCapability::CAPABILITY)
->setPolicies($policies)
->setTransactionType(
PhabricatorRepositoryPushPolicyTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Controls who can push changes to the repository.'))
->setConduitDescription(
pht('Change the push policy of the repository.'))
->setConduitTypeDescription(pht('New policy PHID or constant.'))
->setValue($object->getPolicy(DiffusionPushCapability::CAPABILITY)),
id(new PhabricatorTextEditField())
->setKey('filesizeLimit')
->setLabel(pht('Filesize Limit'))
->setTransactionType(
PhabricatorRepositoryFilesizeLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted file size.'))
->setConduitDescription(pht('Change the filesize limit.'))
->setConduitTypeDescription(pht('New repository filesize limit.'))
->setControlInstructions($filesize_warning)
->setValue($object->getFilesizeLimit()),
id(new PhabricatorTextEditField())
->setKey('copyTimeLimit')
->setLabel(pht('Clone/Fetch Timeout'))
->setTransactionType(
PhabricatorRepositoryCopyTimeLimitTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Maximum permitted duration of internal clone/fetch.'))
->setConduitDescription(pht('Change the copy time limit.'))
->setConduitTypeDescription(pht('New repository copy time limit.'))
->setValue($object->getCopyTimeLimit()),
id(new PhabricatorTextEditField())
->setKey('touchLimit')
->setLabel(pht('Touched Paths Limit'))
->setTransactionType(
PhabricatorRepositoryTouchLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted paths touched per commit.'))
->setConduitDescription(pht('Change the touch limit.'))
->setConduitTypeDescription(pht('New repository touch limit.'))
->setValue($object->getTouchLimit()),
);
}
}
| apache-2.0 |
tejksat/docker-java | docker-java-core/src/main/java/com/github/dockerjava/core/command/AttachContainerResultCallback.java | 695 | /*
* Created on 21.07.2015
*/
package com.github.dockerjava.core.command;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.dockerjava.api.model.Frame;
import com.github.dockerjava.core.async.ResultCallbackTemplate;
/**
*
* @author Marcus Linke
*
* @deprecated use {@link com.github.dockerjava.api.async.ResultCallback.Adapter}
*/
@Deprecated
public class AttachContainerResultCallback extends ResultCallbackTemplate<AttachContainerResultCallback, Frame> {
private static final Logger LOGGER = LoggerFactory.getLogger(AttachContainerResultCallback.class);
@Override
public void onNext(Frame item) {
LOGGER.debug(item.toString());
}
}
| apache-2.0 |
imdatcandan/wasp | wasp/src/main/java/com/orhanobut/wasp/InternalImageHandler.java | 5127 | package com.orhanobut.wasp;
import android.graphics.Bitmap;
import android.os.Looper;
import android.text.TextUtils;
import android.view.ViewGroup;
import android.widget.ImageView;
import com.orhanobut.wasp.utils.StringUtils;
/**
* This class is responsible of the loading image. It automatically handles the canceling and
* loading images for the recycled view as well.
*
* @author Orhan Obut
*/
final class InternalImageHandler implements ImageHandler {
/**
* It is used to determine which url is current for the ImageView
*/
private static final int KEY_TAG = 0x7f070006;
/**
* Stores the cached images
*/
private final ImageCache imageCache;
/**
* It is used to create network request for the bitmap
*/
private final ImageNetworkHandler imageNetworkHandler;
InternalImageHandler(ImageCache cache, ImageNetworkHandler handler) {
this.imageCache = cache;
this.imageNetworkHandler = handler;
}
@Override
public void load(ImageCreator imageCreator) {
checkMain();
loadImage(imageCreator);
}
private void loadImage(final ImageCreator imageCreator) {
final String url = imageCreator.getUrl();
final ImageView imageView = imageCreator.getImageView();
// clear the target
initImageView(imageCreator);
// if there is any old request. cancel it
String tag = (String) imageView.getTag(KEY_TAG);
if (tag != null) {
imageNetworkHandler.cancelRequest(tag);
}
// update the current url
imageView.setTag(KEY_TAG, url);
int width = imageView.getWidth();
int height = imageView.getHeight();
boolean wrapWidth = false;
boolean wrapHeight = false;
if (imageView.getLayoutParams() != null) {
ViewGroup.LayoutParams params = imageView.getLayoutParams();
wrapWidth = params.width == ViewGroup.LayoutParams.WRAP_CONTENT;
wrapHeight = params.height == ViewGroup.LayoutParams.WRAP_CONTENT;
}
// if the view's bounds aren't known yet, and this is not a wrap-content/wrap-content
// view, hold off on loading the image.
boolean isFullyWrapContent = wrapWidth && wrapHeight;
if (width == 0 && height == 0 && !isFullyWrapContent) {
Logger.d("ImageHandler : width == 0 && height == 0 && !isFullyWrapContent");
// return;
}
// Calculate the max image width / height to use while ignoring WRAP_CONTENT dimens.
int maxWidth = wrapWidth ? 0 : width;
int maxHeight = wrapHeight ? 0 : height;
// check if it is already in cache
final String cacheKey = StringUtils.getCacheKey(url, maxWidth, maxHeight);
final Bitmap bitmap = imageCache.getBitmap(cacheKey);
if (bitmap != null) {
imageView.setImageBitmap(bitmap);
Logger.d("CACHE IMAGE : " + url);
return;
}
// make a new request
imageNetworkHandler.requestImage(imageCreator, maxWidth, maxHeight, new InternalCallback<Container>() {
@Override
public void onSuccess(final Container container) {
Bitmap bitmap = container.bitmap;
if (bitmap == null) {
return;
}
container.waspImageCreator.logSuccess(bitmap);
// cache the image
imageCache.putBitmap(container.cacheKey, container.bitmap);
ImageView imageView = container.waspImageCreator.getImageView();
// if it is the current url, set the image
String tag = (String) imageView.getTag(KEY_TAG);
if (TextUtils.equals(tag, container.waspImageCreator.getUrl())) {
imageView.setImageBitmap(container.bitmap);
imageView.setTag(KEY_TAG, null);
}
}
@Override
public void onError(WaspError error) {
int errorImage = imageCreator.getErrorImage();
if (errorImage != 0) {
imageView.setImageResource(errorImage);
}
error.log();
}
});
imageCreator.logRequest();
}
// clear the target by setting null or default placeholder
private void initImageView(ImageCreator waspImageCreator) {
int defaultImage = waspImageCreator.getDefaultImage();
ImageView imageView = waspImageCreator.getImageView();
if (defaultImage != 0) {
imageView.setImageResource(defaultImage);
return;
}
imageView.setImageBitmap(null);
}
@Override
public void clearCache() {
if (imageCache == null) {
return;
}
imageCache.clearCache();
}
// the call should be done in main thread
private void checkMain() {
if (Looper.myLooper() != Looper.getMainLooper()) {
throw new IllegalStateException("Wasp.Image.load() must be invoked from the main thread.");
}
}
/**
* Simple cache adapter interface.
*/
interface ImageCache {
Bitmap getBitmap(String url);
void putBitmap(String url, Bitmap bitmap);
void clearCache();
}
interface ImageNetworkHandler {
void requestImage(ImageCreator waspImageCreator, int maxWidth, int maxHeight, InternalCallback<Container> waspCallback);
void cancelRequest(String tag);
}
static class Container {
String cacheKey;
Bitmap bitmap;
ImageCreator waspImageCreator;
}
}
| apache-2.0 |
camallen/Panoptes | docs/source/includes/_json_api.md | 1853 | # JSON-API conventions
## Resource(s) themselves
```json
{
"users": [{
"id": 123,
...
}],
...
}
```
If you request a resource, you will find the results under a top-level key with
the plural name of the resource. So for instance, if you a single specific user,
you will find the user record under the `users` key.
<aside class="notice">
Resources will always be returned an array. If you requested a single specific
resource (usually by passing in its <code>id</code>), you can rely on this being an array
with at most one element.
</aside>
## Links
```json
{
"projects": [{
...,
"links": {
"workflows": [123],
"avatar": {"href": "/projects/123/avatar", "type": "avatars"},
...
}
}],
"links": {
"projects.workflows": {
"href": "/workflows?project_id={projects.id}",
"type": "workflows"
},
"projects.avatar": {
"href": "/projects/{projects.id}/avatar",
"type": "media"
},
...
}
}
```
Any resource returned will specify a list of linked resources under its `links`
attribute. Definition on where to request those linked resources can be found
under the top-level `links` key (as opposed to the per-resource `links`).
## Pagination
```json
{
"page": 1,
"page_size": 2,
"count": 28,
"include": [],
"page_count": 14,
"previous_page": 14,
"next_page": 2,
"first_href": "/users?page_size=2",
"previous_href": "/users?page=14page_size=2",
"next_href": "/users?page=2&page_size=2",
"last_href": "/users?page=14&page_size=2"
}
```
When requesting a list of resources, rather than a single resource, the
response will include a top-level `meta` key. For performance reasons, results
are returned in pages. You can use the data under the `meta` key to
automatically navigate these paginated results.
## Including linked resources
TODO
| apache-2.0 |
rewardStyle/apache.thrift | lib/cpp/src/thrift/transport/TSocket.cpp | 23335 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <cstring>
#include <sstream>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#include <sys/types.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <thrift/concurrency/Monitor.h>
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TTransportException.h>
#include <thrift/transport/PlatformSocket.h>
#ifndef SOCKOPT_CAST_T
# ifndef _WIN32
# define SOCKOPT_CAST_T void
# else
# define SOCKOPT_CAST_T char
# endif // _WIN32
#endif
template<class T>
inline const SOCKOPT_CAST_T* const_cast_sockopt(const T* v) {
return reinterpret_cast<const SOCKOPT_CAST_T*>(v);
}
template<class T>
inline SOCKOPT_CAST_T* cast_sockopt(T* v) {
return reinterpret_cast<SOCKOPT_CAST_T*>(v);
}
namespace apache { namespace thrift { namespace transport {
using namespace std;
// Global var to track total socket sys calls
uint32_t g_socket_syscalls = 0;
/**
* TSocket implementation.
*
*/
TSocket::TSocket(string host, int port) :
host_(host),
port_(port),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
}
TSocket::TSocket(string path) :
host_(""),
port_(0),
path_(path),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket() :
host_(""),
port_(0),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket(THRIFT_SOCKET socket) :
host_(""),
port_(0),
path_(""),
socket_(socket),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
}
TSocket::~TSocket() {
close();
}
bool TSocket::isOpen() {
return (socket_ != THRIFT_INVALID_SOCKET);
}
bool TSocket::peek() {
if (!isOpen()) {
return false;
}
uint8_t buf;
int r = static_cast<int>(recv(socket_, cast_sockopt(&buf), 1, MSG_PEEK));
if (r == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
#if defined __FreeBSD__ || defined __MACH__
/* shigin:
* freebsd returns -1 and THRIFT_ECONNRESET if socket was closed by
* the other side
*/
if (errno_copy == THRIFT_ECONNRESET)
{
close();
return false;
}
#endif
GlobalOutput.perror("TSocket::peek() recv() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "recv()", errno_copy);
}
return (r > 0);
}
void TSocket::openConnection(struct addrinfo *res) {
if (isOpen()) {
return;
}
if (! path_.empty()) {
socket_ = socket(PF_UNIX, SOCK_STREAM, IPPROTO_IP);
} else {
socket_ = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
}
if (socket_ == THRIFT_INVALID_SOCKET) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() socket() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "socket()", errno_copy);
}
// Send timeout
if (sendTimeout_ > 0) {
setSendTimeout(sendTimeout_);
}
// Recv timeout
if (recvTimeout_ > 0) {
setRecvTimeout(recvTimeout_);
}
if(keepAlive_) {
setKeepAlive(keepAlive_);
}
// Linger
setLinger(lingerOn_, lingerVal_);
// No delay
setNoDelay(noDelay_);
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
// Uses a low min RTO if asked to.
#ifdef TCP_LOW_MIN_RTO
if (getUseLowMinRto()) {
int one = 1;
setsockopt(socket_, IPPROTO_TCP, TCP_LOW_MIN_RTO, &one, sizeof(one));
}
#endif
// Set the socket to be non blocking for connect if a timeout exists
int flags = THRIFT_FCNTL(socket_, THRIFT_F_GETFL, 0);
if (connTimeout_ > 0) {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
} else {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags & ~THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
}
// Connect the socket
int ret;
if (! path_.empty()) {
#ifndef _WIN32
size_t len = path_.size() + 1;
if (len > sizeof(sockaddr_un::sun_path)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() Unix Domain socket path too long", errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path too long");
}
struct sockaddr_un address;
address.sun_family = AF_UNIX;
memcpy(address.sun_path, path_.c_str(), len);
socklen_t structlen = static_cast<socklen_t>(sizeof(address));
ret = connect(socket_, (struct sockaddr *) &address, structlen);
#else
GlobalOutput.perror("TSocket::open() Unix Domain socket path not supported on windows", -99);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path not supported");
#endif
} else {
ret = connect(socket_, res->ai_addr, static_cast<int>(res->ai_addrlen));
}
// success case
if (ret == 0) {
goto done;
}
if ((THRIFT_GET_SOCKET_ERROR != THRIFT_EINPROGRESS) && (THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() connect() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "connect() failed", errno_copy);
}
struct THRIFT_POLLFD fds[1];
std::memset(fds, 0 , sizeof(fds));
fds[0].fd = socket_;
fds[0].events = THRIFT_POLLOUT;
ret = THRIFT_POLL(fds, 1, connTimeout_);
if (ret > 0) {
// Ensure the socket is connected and that there are no errors set
int val;
socklen_t lon;
lon = sizeof(int);
int ret2 = getsockopt(socket_, SOL_SOCKET, SO_ERROR, cast_sockopt(&val), &lon);
if (ret2 == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() getsockopt() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "getsockopt()", errno_copy);
}
// no errors on socket, go to town
if (val == 0) {
goto done;
}
GlobalOutput.perror("TSocket::open() error on socket (after THRIFT_POLL) " + getSocketInfo(), val);
throw TTransportException(TTransportException::NOT_OPEN, "socket open() error", val);
} else if (ret == 0) {
// socket timed out
string errStr = "TSocket::open() timed out " + getSocketInfo();
GlobalOutput(errStr.c_str());
throw TTransportException(TTransportException::NOT_OPEN, "open() timed out");
} else {
// error on THRIFT_POLL()
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_POLL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_POLL() failed", errno_copy);
}
done:
// Set socket back to normal mode (blocking)
THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags);
if (path_.empty()) {
setCachedAddress(res->ai_addr, static_cast<socklen_t>(res->ai_addrlen));
}
}
void TSocket::open() {
if (isOpen()) {
return;
}
if (! path_.empty()) {
unix_open();
} else {
local_open();
}
}
void TSocket::unix_open(){
if (! path_.empty()) {
// Unix Domain SOcket does not need addrinfo struct, so we pass NULL
openConnection(NULL);
}
}
void TSocket::local_open(){
#ifdef _WIN32
TWinsockSingleton::create();
#endif // _WIN32
if (isOpen()) {
return;
}
// Validate port number
if (port_ < 0 || port_ > 0xFFFF) {
throw TTransportException(TTransportException::NOT_OPEN, "Specified port is invalid");
}
struct addrinfo hints, *res, *res0;
res = NULL;
res0 = NULL;
int error;
char port[sizeof("65535")];
std::memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
sprintf(port, "%d", port_);
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
#ifdef _WIN32
if (error == WSANO_DATA) {
hints.ai_flags &= ~AI_ADDRCONFIG;
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
}
#endif
if (error) {
string errStr = "TSocket::open() getaddrinfo() " + getSocketInfo() + string(THRIFT_GAI_STRERROR(error));
GlobalOutput(errStr.c_str());
close();
throw TTransportException(TTransportException::NOT_OPEN, "Could not resolve host for client socket.");
}
// Cycle through all the returned addresses until one
// connects or push the exception up.
for (res = res0; res; res = res->ai_next) {
try {
openConnection(res);
break;
} catch (TTransportException&) {
if (res->ai_next) {
close();
} else {
close();
freeaddrinfo(res0); // cleanup on failure
throw;
}
}
}
// Free address structure memory
freeaddrinfo(res0);
}
void TSocket::close() {
if (socket_ != THRIFT_INVALID_SOCKET) {
shutdown(socket_, THRIFT_SHUT_RDWR);
::THRIFT_CLOSESOCKET(socket_);
}
socket_ = THRIFT_INVALID_SOCKET;
}
void TSocket::setSocketFD(THRIFT_SOCKET socket) {
if (socket_ != THRIFT_INVALID_SOCKET) {
close();
}
socket_ = socket;
}
uint32_t TSocket::read(uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called read on non-open socket");
}
int32_t retries = 0;
// THRIFT_EAGAIN can be signalled both when a timeout has occurred and when
// the system is out of resources (an awesome undocumented feature).
// The following is an approximation of the time interval under which
// THRIFT_EAGAIN is taken to indicate an out of resources error.
uint32_t eagainThresholdMicros = 0;
if (recvTimeout_) {
// if a readTimeout is specified along with a max number of recv retries, then
// the threshold will ensure that the read timeout is not exceeded even in the
// case of resource errors
eagainThresholdMicros = (recvTimeout_*1000)/ ((maxRecvRetries_>0) ? maxRecvRetries_ : 2);
}
try_again:
// Read from the socket
struct timeval begin;
if (recvTimeout_ > 0) {
THRIFT_GETTIMEOFDAY(&begin, NULL);
} else {
// if there is no read timeout we don't need the TOD to determine whether
// an THRIFT_EAGAIN is due to a timeout or an out-of-resource condition.
begin.tv_sec = begin.tv_usec = 0;
}
int got = static_cast<int>(recv(socket_, cast_sockopt(buf), len, 0));
int errno_copy = THRIFT_GET_SOCKET_ERROR; //THRIFT_GETTIMEOFDAY can change THRIFT_GET_SOCKET_ERROR
++g_socket_syscalls;
// Check for error on read
if (got < 0) {
if (errno_copy == THRIFT_EAGAIN) {
// if no timeout we can assume that resource exhaustion has occurred.
if (recvTimeout_ == 0) {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
// check if this is the lack of resources or timeout case
struct timeval end;
THRIFT_GETTIMEOFDAY(&end, NULL);
uint32_t readElapsedMicros = static_cast<uint32_t>(
((end.tv_sec - begin.tv_sec) * 1000 * 1000)
+ (((uint64_t)(end.tv_usec - begin.tv_usec))));
if (!eagainThresholdMicros || (readElapsedMicros < eagainThresholdMicros)) {
if (retries++ < maxRecvRetries_) {
THRIFT_SLEEP_USEC(50);
goto try_again;
} else {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
} else {
// infer that timeout has been hit
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (timed out)");
}
}
// If interrupted, try again
if (errno_copy == THRIFT_EINTR && retries++ < maxRecvRetries_) {
goto try_again;
}
#if defined __FreeBSD__ || defined __MACH__
if (errno_copy == THRIFT_ECONNRESET) {
/* shigin: freebsd doesn't follow POSIX semantic of recv and fails with
* THRIFT_ECONNRESET if peer performed shutdown
* edhall: eliminated close() since we do that in the destructor.
*/
return 0;
}
#endif
#ifdef _WIN32
if(errno_copy == WSAECONNRESET) {
return 0; // EOF
}
#endif
// Now it's not a try again case, but a real probblez
GlobalOutput.perror("TSocket::read() recv() " + getSocketInfo(), errno_copy);
// If we disconnect with no linger time
if (errno_copy == THRIFT_ECONNRESET) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ECONNRESET");
}
// This ish isn't open
if (errno_copy == THRIFT_ENOTCONN) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ENOTCONN");
}
// Timed out!
if (errno_copy == THRIFT_ETIMEDOUT) {
throw TTransportException(TTransportException::TIMED_OUT, "THRIFT_ETIMEDOUT");
}
// Some other error, whatevz
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
}
// The remote host has closed the socket
if (got == 0) {
// edhall: we used to call close() here, but our caller may want to deal
// with the socket fd and we'll close() in our destructor in any case.
return 0;
}
// Pack data into string
return got;
}
void TSocket::write(const uint8_t* buf, uint32_t len) {
uint32_t sent = 0;
while (sent < len) {
uint32_t b = write_partial(buf + sent, len - sent);
if (b == 0) {
// This should only happen if the timeout set with SO_SNDTIMEO expired.
// Raise an exception.
throw TTransportException(TTransportException::TIMED_OUT,
"send timeout expired");
}
sent += b;
}
}
uint32_t TSocket::write_partial(const uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called write on non-open socket");
}
uint32_t sent = 0;
int flags = 0;
#ifdef MSG_NOSIGNAL
// Note the use of MSG_NOSIGNAL to suppress SIGPIPE errors, instead we
// check for the THRIFT_EPIPE return condition and close the socket in that case
flags |= MSG_NOSIGNAL;
#endif // ifdef MSG_NOSIGNAL
int b = static_cast<int>(send(socket_, const_cast_sockopt(buf + sent), len - sent, flags));
++g_socket_syscalls;
if (b < 0) {
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EWOULDBLOCK || THRIFT_GET_SOCKET_ERROR == THRIFT_EAGAIN) {
return 0;
}
// Fail on a send error
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::write_partial() send() " + getSocketInfo(), errno_copy);
if (errno_copy == THRIFT_EPIPE || errno_copy == THRIFT_ECONNRESET || errno_copy == THRIFT_ENOTCONN) {
close();
throw TTransportException(TTransportException::NOT_OPEN, "write() send()", errno_copy);
}
throw TTransportException(TTransportException::UNKNOWN, "write() send()", errno_copy);
}
// Fail on blocked send
if (b == 0) {
throw TTransportException(TTransportException::NOT_OPEN, "Socket send returned 0.");
}
return b;
}
std::string TSocket::getHost() {
return host_;
}
int TSocket::getPort() {
return port_;
}
void TSocket::setHost(string host) {
host_ = host;
}
void TSocket::setPort(int port) {
port_ = port;
}
void TSocket::setLinger(bool on, int linger) {
lingerOn_ = on;
lingerVal_ = linger;
if (socket_ == THRIFT_INVALID_SOCKET) {
return;
}
#ifndef _WIN32
struct linger l = {(lingerOn_ ? 1 : 0), lingerVal_};
#else
struct linger l = {(lingerOn_ ? 1 : 0), static_cast<u_short>(lingerVal_)};
#endif
int ret = setsockopt(socket_, SOL_SOCKET, SO_LINGER, cast_sockopt(&l), sizeof(l));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setLinger() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setNoDelay(bool noDelay) {
noDelay_ = noDelay;
if (socket_ == THRIFT_INVALID_SOCKET || !path_.empty()) {
return;
}
// Set socket to NODELAY
int v = noDelay_ ? 1 : 0;
int ret = setsockopt(socket_, IPPROTO_TCP, TCP_NODELAY, cast_sockopt(&v), sizeof(v));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setNoDelay() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setConnTimeout(int ms) {
connTimeout_ = ms;
}
void setGenericTimeout(THRIFT_SOCKET s, int timeout_ms, int optname)
{
if (timeout_ms < 0) {
char errBuf[512];
sprintf(errBuf, "TSocket::setGenericTimeout with negative input: %d\n", timeout_ms);
GlobalOutput(errBuf);
return;
}
if (s == THRIFT_INVALID_SOCKET) {
return;
}
#ifdef _WIN32
DWORD platform_time = static_cast<DWORD>(timeout_ms);
#else
struct timeval platform_time = {
(int)(timeout_ms/1000),
(int)((timeout_ms%1000)*1000)};
#endif
int ret = setsockopt(s, SOL_SOCKET, optname, cast_sockopt(&platform_time), sizeof(platform_time));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setGenericTimeout() setsockopt() ", errno_copy);
}
}
void TSocket::setRecvTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_RCVTIMEO);
recvTimeout_ = ms;
}
void TSocket::setSendTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_SNDTIMEO);
sendTimeout_ = ms;
}
void TSocket::setKeepAlive(bool keepAlive) {
keepAlive_ = keepAlive;
if (socket_ == -1) {
return;
}
int value = keepAlive_;
int ret = setsockopt(socket_, SOL_SOCKET, SO_KEEPALIVE, const_cast_sockopt(&value), sizeof(value));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setKeepAlive() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setMaxRecvRetries(int maxRecvRetries) {
maxRecvRetries_ = maxRecvRetries;
}
string TSocket::getSocketInfo() {
std::ostringstream oss;
if (host_.empty() || port_ == 0) {
oss << "<Host: " << getPeerAddress();
oss << " Port: " << getPeerPort() << ">";
} else {
oss << "<Host: " << host_ << " Port: " << port_ << ">";
}
return oss.str();
}
std::string TSocket::getPeerHost() {
if (peerHost_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return host_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerHost_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo((sockaddr*) addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice), 0);
peerHost_ = clienthost;
}
return peerHost_;
}
std::string TSocket::getPeerAddress() {
if (peerAddress_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return peerAddress_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerAddress_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo(addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice),
NI_NUMERICHOST|NI_NUMERICSERV);
peerAddress_ = clienthost;
peerPort_ = std::atoi(clientservice);
}
return peerAddress_;
}
int TSocket::getPeerPort() {
getPeerAddress();
return peerPort_;
}
void TSocket::setCachedAddress(const sockaddr* addr, socklen_t len) {
if (!path_.empty()) {
return;
}
switch (addr->sa_family) {
case AF_INET:
if (len == sizeof(sockaddr_in)) {
memcpy((void*)&cachedPeerAddr_.ipv4, (void*)addr, len);
}
break;
case AF_INET6:
if (len == sizeof(sockaddr_in6)) {
memcpy((void*)&cachedPeerAddr_.ipv6, (void*)addr, len);
}
break;
}
}
sockaddr* TSocket::getCachedAddress(socklen_t* len) const {
switch (cachedPeerAddr_.ipv4.sin_family) {
case AF_INET:
*len = sizeof(sockaddr_in);
return (sockaddr*) &cachedPeerAddr_.ipv4;
case AF_INET6:
*len = sizeof(sockaddr_in6);
return (sockaddr*) &cachedPeerAddr_.ipv6;
default:
return NULL;
}
}
bool TSocket::useLowMinRto_ = false;
void TSocket::setUseLowMinRto(bool useLowMinRto) {
useLowMinRto_ = useLowMinRto;
}
bool TSocket::getUseLowMinRto() {
return useLowMinRto_;
}
const std::string TSocket::getOrigin() {
std::ostringstream oss;
oss << getPeerHost() << ":" << getPeerPort();
return oss.str();
}
}}} // apache::thrift::transport
| apache-2.0 |
hakanu/iftar | _posts_/vakit/ALMANYA/KAMEN/2017-02-01-.markdown | 308 | ---
layout: vakit_dashboard
title: KAMEN, ALMANYA için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç
permalink: /ALMANYA/KAMEN/
---
<script type="text/javascript">
var GLOBAL_COUNTRY = 'ALMANYA';
var GLOBAL_CITY = 'KAMEN';
var GLOBAL_STATE = '';
var lat = 72;
var lon = 21;
</script>
| apache-2.0 |
dmmiller612/deeplearning4j | deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/graph/MergeVertex.java | 6727 | /*-
*
* * Copyright 2016 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.deeplearning4j.nn.conf.graph;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.inputs.InvalidInputTypeException;
import org.deeplearning4j.nn.graph.ComputationGraph;
import org.nd4j.linalg.api.ndarray.INDArray;
/** A MergeVertex is used to combine the activations of two or more layers/GraphVertex by means of concatenation/merging.<br>
* Exactly how this is done depends on the type of input.<br>
* For 2d (feed forward layer) inputs: MergeVertex([numExamples,layerSize1],[numExamples,layerSize2]) -> [numExamples,layerSize1 + layerSize2]<br>
* For 3d (time series) inputs: MergeVertex([numExamples,layerSize1,timeSeriesLength],[numExamples,layerSize2,timeSeriesLength])
* -> [numExamples,layerSize1 + layerSize2,timeSeriesLength]<br>
* For 4d (convolutional) inputs: MergeVertex([numExamples,depth1,width,height],[numExamples,depth2,width,height])
* -> [numExamples,depth1 + depth2,width,height]<br>
* @author Alex Black
*/
public class MergeVertex extends GraphVertex {
@Override
public MergeVertex clone() {
return new MergeVertex();
}
@Override
public boolean equals(Object o) {
return o instanceof MergeVertex;
}
@Override
public int hashCode() {
return 433682566;
}
@Override
public int numParams(boolean backprop) {
return 0;
}
@Override
public org.deeplearning4j.nn.graph.vertex.GraphVertex instantiate(ComputationGraph graph, String name, int idx,
INDArray paramsView, boolean initializeParams) {
return new org.deeplearning4j.nn.graph.vertex.impl.MergeVertex(graph, name, idx);
}
@Override
public InputType getOutputType(int layerIndex, InputType... vertexInputs) throws InvalidInputTypeException {
if (vertexInputs.length == 1)
return vertexInputs[0];
InputType first = vertexInputs[0];
if (first.getType() == InputType.Type.CNNFlat) {
//TODO
//Merging flattened CNN format data could be messy?
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot currently merge CNN data in flattened format. Got: "
+ vertexInputs);
} else if (first.getType() != InputType.Type.CNN) {
//FF or RNN data inputs
int size = 0;
InputType.Type type = null;
for (int i = 0; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != first.getType()) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge activations of different types:"
+ " first type = " + first.getType() + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
int thisSize;
switch (vertexInputs[i].getType()) {
case FF:
thisSize = ((InputType.InputTypeFeedForward) vertexInputs[i]).getSize();
type = InputType.Type.FF;
break;
case RNN:
thisSize = ((InputType.InputTypeRecurrent) vertexInputs[i]).getSize();
type = InputType.Type.RNN;
break;
default:
throw new IllegalStateException("Unknown input type: " + vertexInputs[i]); //Should never happen
}
if (thisSize <= 0) {//Size is not defined
size = -1;
} else {
size += thisSize;
}
}
if (size > 0) {
//Size is specified
if (type == InputType.Type.FF)
return InputType.feedForward(size);
else
return InputType.recurrent(size);
} else {
//size is unknown
if (type == InputType.Type.FF)
return InputType.feedForward(-1);
else
return InputType.recurrent(-1);
}
} else {
//CNN inputs... also check that the depth, width and heights match:
InputType.InputTypeConvolutional firstConv = (InputType.InputTypeConvolutional) first;
int fd = firstConv.getDepth();
int fw = firstConv.getWidth();
int fh = firstConv.getHeight();
int depthSum = fd;
for (int i = 1; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != InputType.Type.CNN) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot process activations of different types:"
+ " first type = " + InputType.Type.CNN + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
InputType.InputTypeConvolutional otherConv = (InputType.InputTypeConvolutional) vertexInputs[i];
int od = otherConv.getDepth();
int ow = otherConv.getWidth();
int oh = otherConv.getHeight();
if (fw != ow || fh != oh) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge CNN activations of different width/heights:"
+ "first [depth,width,height] = [" + fd + "," + fw + "," + fh
+ "], input " + i + " = [" + od + "," + ow + "," + oh + "]");
}
depthSum += od;
}
return InputType.convolutional(fh, fw, depthSum);
}
}
}
| apache-2.0 |
chiaming0914/awe-cpp-sdk | aws-cpp-sdk-codedeploy/source/model/ListApplicationRevisionsRequest.cpp | 2461 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/codedeploy/model/ListApplicationRevisionsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::CodeDeploy::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
ListApplicationRevisionsRequest::ListApplicationRevisionsRequest() :
m_applicationNameHasBeenSet(false),
m_sortBy(ApplicationRevisionSortBy::NOT_SET),
m_sortByHasBeenSet(false),
m_sortOrder(SortOrder::NOT_SET),
m_sortOrderHasBeenSet(false),
m_s3BucketHasBeenSet(false),
m_s3KeyPrefixHasBeenSet(false),
m_deployed(ListStateFilterAction::NOT_SET),
m_deployedHasBeenSet(false),
m_nextTokenHasBeenSet(false)
{
}
Aws::String ListApplicationRevisionsRequest::SerializePayload() const
{
JsonValue payload;
if(m_applicationNameHasBeenSet)
{
payload.WithString("applicationName", m_applicationName);
}
if(m_sortByHasBeenSet)
{
payload.WithString("sortBy", ApplicationRevisionSortByMapper::GetNameForApplicationRevisionSortBy(m_sortBy));
}
if(m_sortOrderHasBeenSet)
{
payload.WithString("sortOrder", SortOrderMapper::GetNameForSortOrder(m_sortOrder));
}
if(m_s3BucketHasBeenSet)
{
payload.WithString("s3Bucket", m_s3Bucket);
}
if(m_s3KeyPrefixHasBeenSet)
{
payload.WithString("s3KeyPrefix", m_s3KeyPrefix);
}
if(m_deployedHasBeenSet)
{
payload.WithString("deployed", ListStateFilterActionMapper::GetNameForListStateFilterAction(m_deployed));
}
if(m_nextTokenHasBeenSet)
{
payload.WithString("nextToken", m_nextToken);
}
return payload.WriteReadable();
}
Aws::Http::HeaderValueCollection ListApplicationRevisionsRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "CodeDeploy_20141006.ListApplicationRevisions"));
return headers;
}
| apache-2.0 |
Fabryprog/camel | core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/RabbitMQEndpointBuilderFactory.java | 115871 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint.dsl;
import java.util.Map;
import javax.annotation.Generated;
import org.apache.camel.ExchangePattern;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
import org.apache.camel.spi.ExceptionHandler;
/**
* The rabbitmq component allows you produce and consume messages from RabbitMQ
* instances.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
public interface RabbitMQEndpointBuilderFactory {
/**
* Builder for endpoint consumers for the RabbitMQ component.
*/
public interface RabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default AdvancedRabbitMQEndpointConsumerBuilder advanced() {
return (AdvancedRabbitMQEndpointConsumerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(boolean autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(String autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
boolean bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
String bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
int concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
String concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
boolean exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
String exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(int prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(
String prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
boolean prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
String prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
boolean prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
String prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(int prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(String prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default RabbitMQEndpointConsumerBuilder basic() {
return (RabbitMQEndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
ExceptionHandler exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
String exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
ExchangePattern exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
String exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option is a: <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
int threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
String threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint producers for the RabbitMQ component.
*/
public interface RabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default AdvancedRabbitMQEndpointProducerBuilder advanced() {
return (AdvancedRabbitMQEndpointProducerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Allow pass null values to header.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
boolean allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* Allow pass null values to header.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
String allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
boolean bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
String bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option is a: <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
int channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option will be converted to a <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
String channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
long channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
String channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
boolean guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
String guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(boolean immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(String immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
boolean lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
String lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(boolean mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(String mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
boolean publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
String publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
long publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
String publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default RabbitMQEndpointProducerBuilder basic() {
return (RabbitMQEndpointProducerBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint for the RabbitMQ component.
*/
public interface RabbitMQEndpointBuilder
extends
RabbitMQEndpointConsumerBuilder, RabbitMQEndpointProducerBuilder {
default AdvancedRabbitMQEndpointBuilder advanced() {
return (AdvancedRabbitMQEndpointBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterQueue(String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointBuilder
extends
AdvancedRabbitMQEndpointConsumerBuilder, AdvancedRabbitMQEndpointProducerBuilder {
default RabbitMQEndpointBuilder basic() {
return (RabbitMQEndpointBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* RabbitMQ (camel-rabbitmq)
* The rabbitmq component allows you produce and consume messages from
* RabbitMQ instances.
*
* Category: messaging
* Available as of version: 2.12
* Maven coordinates: org.apache.camel:camel-rabbitmq
*
* Syntax: <code>rabbitmq:exchangeName</code>
*
* Path parameter: exchangeName (required)
* The exchange name determines which exchange produced messages will sent
* to. In the case of consumers, the exchange name determines which exchange
* the queue will bind to.
*/
default RabbitMQEndpointBuilder rabbitMQ(String path) {
class RabbitMQEndpointBuilderImpl extends AbstractEndpointBuilder implements RabbitMQEndpointBuilder, AdvancedRabbitMQEndpointBuilder {
public RabbitMQEndpointBuilderImpl(String path) {
super("rabbitmq", path);
}
}
return new RabbitMQEndpointBuilderImpl(path);
}
} | apache-2.0 |
quantopian/pyfolio | pyfolio/tests/test_tears.py | 4911 | from matplotlib.testing.decorators import cleanup
from unittest import TestCase
from nose_parameterized import parameterized
import os
import gzip
from pandas import read_csv
from pyfolio.utils import (to_utc, to_series)
from pyfolio.tears import (create_full_tear_sheet,
create_simple_tear_sheet,
create_returns_tear_sheet,
create_position_tear_sheet,
create_txn_tear_sheet,
create_round_trip_tear_sheet,
create_interesting_times_tear_sheet,)
class PositionsTestCase(TestCase):
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
test_returns = read_csv(
gzip.open(
__location__ + '/test_data/test_returns.csv.gz'),
index_col=0, parse_dates=True)
test_returns = to_series(to_utc(test_returns))
test_txn = to_utc(read_csv(
gzip.open(
__location__ + '/test_data/test_txn.csv.gz'),
index_col=0, parse_dates=True))
test_pos = to_utc(read_csv(
gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
index_col=0, parse_dates=True))
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
({'round_trips': True},),
({'hide_positions': True},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_full_tear_sheet_breakdown(self, kwargs):
create_full_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
])
@cleanup
def test_create_simple_tear_sheet_breakdown(self, kwargs):
create_simple_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'live_start_date':
test_returns.index[-20]},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_returns_tear_sheet_breakdown(self, kwargs):
create_returns_tear_sheet(self.test_returns,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'hide_positions': True},),
({'show_and_plot_top_pos': 0},),
({'show_and_plot_top_pos': 1},),
])
@cleanup
def test_create_position_tear_sheet_breakdown(self, kwargs):
create_position_tear_sheet(self.test_returns,
self.test_pos,
**kwargs
)
@parameterized.expand([({},),
({'unadjusted_returns': test_returns},),
])
@cleanup
def test_create_txn_tear_sheet_breakdown(self, kwargs):
create_txn_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'sector_mappings': {}},),
])
@cleanup
def test_create_round_trip_tear_sheet_breakdown(self, kwargs):
create_round_trip_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'legend_loc': 1},),
])
@cleanup
def test_create_interesting_times_tear_sheet_breakdown(self,
kwargs):
create_interesting_times_tear_sheet(self.test_returns,
self.test_returns,
**kwargs
)
| apache-2.0 |
cha87de/colosseum | app/components/execution/Init.java | 1451 | /*
* Copyright (c) 2014-2015 University of Ulm
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package components.execution;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Created by daniel on 24.07.15.
*/
@Singleton public class Init {
@Inject public Init(ExecutionService executionService, Set<Runnable> runnables,
Set<Schedulable> schedulables) {
checkNotNull(executionService);
checkNotNull(runnables);
checkNotNull(schedulables);
for (Runnable runnable : runnables) {
executionService.execute(runnable);
}
for (Schedulable schedulable : schedulables) {
executionService.schedule(schedulable);
}
}
}
| apache-2.0 |
bptlab/processeditor | src/net/frapu/code/visualization/twf/ToolErrorConnector.java | 3182 | /**
* copyright
* Inubit AG
* Schoeneberger Ufer 89
* 10785 Berlin
* Germany
*/
package net.frapu.code.visualization.twf;
import java.awt.Color;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Point;
import java.awt.Shape;
import java.awt.geom.Rectangle2D;
import java.util.HashSet;
import java.util.Set;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessUtils;
/**
* @author ff
*
*/
public class ToolErrorConnector extends ProcessNode {
private static final int DIST_X = 4;
private static final int DIST_Y = 3;
private static final int BUTTON_WIDTH = 25;
public static final int AREA_HEIGHT = 20;
public static String PROP_PARENT_ID = "#ParentToolID";
private Tool f_parent;
private String PROP_NUMBER = "#ConnectorNumber";
/**
* for serialization
*/
public ToolErrorConnector() {
f_parent = null;
setNumber(0);
}
public Tool getParent() {
return f_parent;
}
/**
* @param tool
*/
public ToolErrorConnector(Tool tool, int number) {
f_parent = tool;
setProperty(PROP_PARENT_ID, f_parent.getId());
setNumber(number);
}
@Override
public void addContext(ProcessModel context) {
super.addContext(context);
if(f_parent == null) {
f_parent = (Tool) context.getNodeById(getProperty(PROP_PARENT_ID));
if(f_parent != null)//can happen with legacy models
f_parent.setErrorConnector(this,getNumber());
}
}
@Override
protected Shape getOutlineShape() {
Rectangle2D outline = new Rectangle2D.Float(getPos().x - (getSize().width / 2),
getPos().y - (getSize().height / 2), getSize().width, getSize().height);
return outline;
}
@Override
protected void paintInternal(Graphics g) {
updatePosAndSize();
Graphics2D g2 = (Graphics2D) g;
g2.setStroke(ProcessUtils.defaultStroke);
g2.setColor(Color.WHITE);
g2.fillRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
g2.setColor(Color.BLACK);
g2.drawRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
}
/**
* @param left
*/
public void setNumber(int number) {
setProperty(PROP_NUMBER , ""+number);
}
public int getNumber() {
try {
return Integer.parseInt(getProperty(PROP_NUMBER));
} catch (NumberFormatException e) {
e.printStackTrace();
return 0;
}
}
/**
*
*/
private void updatePosAndSize() {
if(f_parent != null) {
Point _tlPos = new Point(f_parent.getPos().x-f_parent.getSize().width/2,
f_parent.getPos().y+f_parent.getSize().height/2-AREA_HEIGHT);
_tlPos.x += ((getNumber()+0.5)*BUTTON_WIDTH) + (getNumber()+1)*DIST_X;
_tlPos.y += AREA_HEIGHT/2;
setPos(_tlPos);
setSize(BUTTON_WIDTH, AREA_HEIGHT-2*DIST_Y);
}
}
@Override
public Set<Point> getDefaultConnectionPoints() {
HashSet<Point> cp = new HashSet<Point>();
cp.add(new Point(0, (getSize().height/2)));
return cp;
}
}
| apache-2.0 |
nojvek/TypeScript | tests/baselines/reference/tscWatch/emitAndErrorUpdates/default/file-not-exporting-a-deep-multilevel-import-that-changes.js | 7454 | /a/lib/tsc.js --w
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x2: number;
y: number;
}
//// [/user/username/projects/myproject/b.ts]
import { Point } from "./a";
export interface PointWrapper extends Point {
}
//// [/user/username/projects/myproject/c.ts]
import { PointWrapper } from "./b";
export function getPoint(): PointWrapper {
return {
name: "test",
c: {
x: 1,
y: 2
}
}
};
//// [/user/username/projects/myproject/d.ts]
import { getPoint } from "./c";
getPoint().c.x;
//// [/user/username/projects/myproject/e.ts]
import "./d";
//// [/user/username/projects/myproject/tsconfig.json]
{}
//// [/a/lib/lib.d.ts]
/// <reference no-default-lib="true"/>
interface Boolean {}
interface Function {}
interface CallableFunction {}
interface NewableFunction {}
interface IArguments {}
interface Number { toExponential: any; }
interface Object {}
interface RegExp {}
interface String { charAt: any; }
interface Array<T> { length: number; [n: number]: T; }
//// [/user/username/projects/myproject/a.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/b.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/c.js]
"use strict";
exports.__esModule = true;
exports.getPoint = void 0;
function getPoint() {
return {
name: "test",
c: {
x: 1,
y: 2
}
};
}
exports.getPoint = getPoint;
;
//// [/user/username/projects/myproject/d.js]
"use strict";
exports.__esModule = true;
var c_1 = require("./c");
c_1.getPoint().c.x;
//// [/user/username/projects/myproject/e.js]
"use strict";
exports.__esModule = true;
require("./d");
Output::
>> Screen clear
[[90m12:00:29 AM[0m] Starting compilation in watch mode...
[96mc.ts[0m:[93m6[0m:[93m13[0m - [91merror[0m[90m TS2322: [0mType '{ x: number; y: number; }' is not assignable to type 'Coords'.
Object literal may only specify known properties, and 'x' does not exist in type 'Coords'.
[7m6[0m x: 1,
[7m [0m [91m ~~~~[0m
[96ma.ts[0m:[93m3[0m:[93m5[0m
[7m3[0m c: Coords;
[7m [0m [96m ~[0m
The expected type comes from property 'c' which is declared here on type 'PointWrapper'
[96md.ts[0m:[93m2[0m:[93m14[0m - [91merror[0m[90m TS2339: [0mProperty 'x' does not exist on type 'Coords'.
[7m2[0m getPoint().c.x;
[7m [0m [91m ~[0m
[[90m12:00:40 AM[0m] Found 2 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
Change:: Rename property x2 to x of interface Coords
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x: number;
y: number;
}
//// [/user/username/projects/myproject/a.js] file written with same contents
//// [/user/username/projects/myproject/b.js] file written with same contents
Output::
>> Screen clear
[[90m12:00:44 AM[0m] File change detected. Starting incremental compilation...
[[90m12:00:51 AM[0m] Found 0 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
| apache-2.0 |
vinicarfaria/Servlet | web/index.html | 4905 | <!DOCTYPE html>
<!--
To change this license header, choose License Headers in Project Properties.
To change this template file, choose Tools | Templates
and open the template in the editor.
-->
<html>
<head>
<title>POGOBOX</title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src="js/jquery-3.1.1.min.js"></script>
<script>
function SalvarPokemonsJSON() {
$('#alerta').empty();
//$("div#loading").toggle();
//date : $("for#id").serialize();
jQuery.ajax(
{
type: "POST",
cache: false,
url: "PokemonServlet",
data: $("form#frm-cadastra-pokemon").serialize(),
dataType: "json",
success: function () {
},
error: function () {
},
complete: function () {
$('#alerta').append(""+$('#txt-nome').val()+" cadastrado com sucesso!");
$('#txt-nome').val('');
$('#txt-cp').val('');
$('#txt-candy').val('');
var disp = $('#tabela').css('display');
if (disp !== 'none'){
CarregarPokemonsJSON();
}
}
}
);
return false;
}
function CarregarPokemonsJSON() {
$('#tabela').empty();
$.ajax({
type: "POST",
cache: false,
url: "LPokemonServlet",
success: function (dados) {
$('#tabela').append('<tr><th>Nome</th><th>CP</th><th>Genero</th><th>Candy</th</tr>');
for (i=0; i< dados.length; i++) {
$('#tabela').append('<tr><td>'+dados[i].nome+'</td><td>'+dados[i].cp+'</td><td>'+dados[i].gen+'</td><td>'+dados[i].candy+'</td></tr>)');
}
},
error: function () {
},
complete: function () {
var disp = $('#tabela').css('display');
if (disp == 'none'){
$('#tabela').toggle();
}
}
});
}
function EsconderPokemons() {
$('#tabela').toggle();
}
</script>
<style>
.container {
width: 500px;
clear: both;
}
.container input {
width: 100%;
clear: both;
border-radius: 3px;
margin-bottom: 5px;
}
table {
border-collapse: collapse;
width: 100%;
}
th, td {
text-align: center;
padding: 8px;
}
tr:nth-child(even){background-color: #f2f2f2}
</style>
</head>
<body>
<div class="container">
<form id="frm-cadastra-pokemon">
<label for="txt-nome">Nome do Pokemon:</label>
<input id="txt-nome" name="txt-nome" type="text" />
<br />
<label for="txt-cp">CP:</label>
<input id="txt-cp" name="txt-cp" type="text" />
<br />
<label for="txt-gen">Gênero:</label>
<select id="txt-gen" name="txt-gen" >
<option value="Masculino">Masculino</option>
<option value="Feminino">Feminino</option>
</select>
<br />
<label for="txt-candy">Candy:</label>
<input id="txt-candy" name="txt-candy" type="text" />
<br />
<br />
</form>
<button style="margin-bottom: 5px" onclick="SalvarPokemonsJSON();">Salvar</button><br />
<label id="alerta" style="width:100%; margin: 5px"></label><br />
<button onclick="CarregarPokemonsJSON();">Mostrar Pokémons</button>
<button style="display:inline-block" onclick="EsconderPokemons();">Esconder Pokémons</button>
<br />
<br />
<table id="tabela" style="width:100%; display: none">
</table>
</div>
</body>
</html>
| apache-2.0 |
PaoloRotolo/AppIntro | example/src/main/java/com/amqtech/opensource/appintroexample/util/FragmentStatePagerAdapter.java | 7938 | package com.amqtech.opensource.appintroexample.util;
/**
* Created by andrew on 11/17/16.
*/
import android.app.Fragment;
import android.app.FragmentManager;
import android.app.FragmentTransaction;
import android.os.Build;
import android.os.Bundle;
import android.os.Parcelable;
import androidx.fragment.app.FragmentPagerAdapter;
import androidx.viewpager.widget.PagerAdapter;
import android.view.View;
import android.view.ViewGroup;
import java.util.ArrayList;
/**
* Implementation of {@link PagerAdapter} that
* uses a {@link Fragment} to manage each page. This class also handles
* saving and restoring of fragment's state.
* <p>
* <p>This version of the pager is more useful when there are a large number
* of pages, working more like a list view. When pages are not visible to
* the user, their entire fragment may be destroyed, only keeping the saved
* state of that fragment. This allows the pager to hold on to much less
* memory associated with each visited page as compared to
* {@link FragmentPagerAdapter} at the cost of potentially more overhead when
* switching between pages.
* <p>
* <p>When using FragmentPagerAdapter the host ViewPager must have a
* valid ID set.</p>
* <p>
* <p>Subclasses only need to implement {@link #getItem(int)}
* and {@link #getCount()} to have a working adapter.
* <p>
* <p>Here is an example implementation of a pager containing fragments of
* lists:
* <p>
* {@sample frameworks/support/samples/Support13Demos/src/com/example/android/supportv13/app/FragmentStatePagerSupport.java
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager</code> resource of the top-level fragment is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager.xml
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager_list</code> resource containing each
* individual fragment's layout is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager_list.xml
* complete}
*/
public abstract class FragmentStatePagerAdapter extends PagerAdapter {
private static final String TAG = "FragmentStatePagerAdapter";
private static final boolean DEBUG = false;
private final FragmentManager mFragmentManager;
private FragmentTransaction mCurTransaction = null;
private ArrayList<Fragment.SavedState> mSavedState = new ArrayList<Fragment.SavedState>();
private ArrayList<Fragment> mFragments = new ArrayList<Fragment>();
private Fragment mCurrentPrimaryItem = null;
public FragmentStatePagerAdapter(FragmentManager fm) {
mFragmentManager = fm;
}
/**
* Return the Fragment associated with a specified position.
*/
public abstract Fragment getItem(int position);
@Override
public void startUpdate(ViewGroup container) {
if (container.getId() == View.NO_ID) {
throw new IllegalStateException("ViewPager with adapter " + this
+ " requires a view id");
}
}
@Override
public Object instantiateItem(ViewGroup container, int position) {
// If we already have this item instantiated, there is nothing
// to do. This can happen when we are restoring the entire pager
// from its saved state, where the fragment manager has already
// taken care of restoring the fragments we previously had instantiated.
if (mFragments.size() > position) {
Fragment f = mFragments.get(position);
if (f != null) {
return f;
}
}
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
Fragment fragment = getItem(position);
if (mSavedState.size() > position) {
Fragment.SavedState fss = mSavedState.get(position);
if (fss != null) {
fragment.setInitialSavedState(fss);
}
}
while (mFragments.size() <= position) {
mFragments.add(null);
}
fragment.setMenuVisibility(false);
setFragmentUserVisibleHint(fragment);
mFragments.set(position, fragment);
mCurTransaction.add(container.getId(), fragment);
return fragment;
}
public void setFragmentUserVisibleHint(Fragment fragment) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1) {
fragment.setUserVisibleHint(false);
}
}
@Override
public void destroyItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
while (mSavedState.size() <= position) {
mSavedState.add(null);
}
mSavedState.set(position, fragment.isAdded()
? mFragmentManager.saveFragmentInstanceState(fragment) : null);
mFragments.set(position, null);
mCurTransaction.remove(fragment);
}
@Override
public void setPrimaryItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (fragment != mCurrentPrimaryItem) {
if (mCurrentPrimaryItem != null) {
mCurrentPrimaryItem.setMenuVisibility(false);
setFragmentUserVisibleHint(mCurrentPrimaryItem);
}
if (fragment != null) {
fragment.setMenuVisibility(true);
setFragmentUserVisibleHint(fragment);
}
mCurrentPrimaryItem = fragment;
}
}
@Override
public void finishUpdate(ViewGroup container) {
if (mCurTransaction != null) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
mCurTransaction.commitNowAllowingStateLoss();
}
mCurTransaction = null;
}
}
@Override
public boolean isViewFromObject(View view, Object object) {
return ((Fragment) object).getView() == view;
}
@Override
public Parcelable saveState() {
Bundle state = null;
if (mSavedState.size() > 0) {
state = new Bundle();
Fragment.SavedState[] fss = new Fragment.SavedState[mSavedState.size()];
mSavedState.toArray(fss);
state.putParcelableArray("states", fss);
}
for (int i = 0; i < mFragments.size(); i++) {
Fragment f = mFragments.get(i);
if (f != null && f.isAdded()) {
if (state == null) {
state = new Bundle();
}
String key = "f" + i;
mFragmentManager.putFragment(state, key, f);
}
}
return state;
}
@Override
public void restoreState(Parcelable state, ClassLoader loader) {
if (state != null) {
Bundle bundle = (Bundle) state;
bundle.setClassLoader(loader);
Parcelable[] fss = bundle.getParcelableArray("states");
mSavedState.clear();
mFragments.clear();
if (fss != null) {
for (int i = 0; i < fss.length; i++) {
mSavedState.add((Fragment.SavedState) fss[i]);
}
}
Iterable<String> keys = bundle.keySet();
for (String key : keys) {
if (key.startsWith("f")) {
int index = Integer.parseInt(key.substring(1));
Fragment f = mFragmentManager.getFragment(bundle, key);
if (f != null) {
while (mFragments.size() <= index) {
mFragments.add(null);
}
f.setMenuVisibility(false);
mFragments.set(index, f);
}
}
}
}
}
}
| apache-2.0 |
renekrie/querqy | querqy-core/src/test/java/querqy/rewrite/commonrules/model/FilterInstructionTest.java | 5024 | package querqy.rewrite.commonrules.model;
import static java.util.Collections.singletonList;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import querqy.model.*;
import querqy.rewrite.commonrules.AbstractCommonRulesTest;
import querqy.rewrite.commonrules.CommonRulesRewriter;
import querqy.model.Input;
import java.util.Collection;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static querqy.QuerqyMatchers.*;
import static querqy.QuerqyMatchers.dmq;
import static querqy.QuerqyMatchers.term;
import static querqy.rewrite.commonrules.select.SelectionStrategyFactory.DEFAULT_SELECTION_STRATEGY;
/**
* Created by rene on 08/12/2015.
*/
public class FilterInstructionTest extends AbstractCommonRulesTest {
@Test
public void testThatBoostQueriesWithMustClauseUseMM100ByDefault() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a b").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
dmq(must(), term("a", true)),
dmq(must(), term("b", true))
)
);
}
@Test
public void testPurelyNegativeFilterQuery() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(true);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("-ab").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
assertNotNull(filterQueries);
assertEquals(1, filterQueries.size());
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
should(),
dmq(
mustNot(),
term("ab", true)
)
)
);
}
@Test
public void testThatFilterQueriesAreMarkedAsGenerated() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
dmq(must(), term("a", true))
)
);
}
@Test
public void testThatMainQueryIsNotMarkedAsGenerated() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
QuerqyQuery<?> mainQuery = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter()).getUserQuery();
assertFalse(mainQuery.isGenerated());
}
}
| apache-2.0 |
apache/derby | java/org.apache.derby.engine/org/apache/derby/iapi/sql/compile/OptimizerPlan.java | 12716 | /*
Derby - Class org.apache.derby.iapi.sql.compile.OptimizerPlan
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to you under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.derby.iapi.sql.compile;
import org.apache.derby.catalog.AliasInfo;
import org.apache.derby.shared.common.error.StandardException;
import org.apache.derby.shared.common.reference.SQLState;
import org.apache.derby.iapi.sql.StatementUtil;
import org.apache.derby.iapi.sql.compile.CompilerContext;
import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
import org.apache.derby.iapi.sql.dictionary.AliasDescriptor;
import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
import org.apache.derby.iapi.sql.dictionary.DataDictionary;
import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
import org.apache.derby.iapi.sql.dictionary.UniqueTupleDescriptor;
import org.apache.derby.iapi.util.IdUtil;
/**
* <p>
* High level description of a plan for consideration by the Optimizer.
* This is used to specify a complete plan via optimizer overrides. A
* plan is a tree whose interior nodes are join operators and whose
* leaves are row sources (conglomerates or tableFunctions).
* </p>
*/
public abstract class OptimizerPlan
{
////////////////////////////////////////////////////////////////////////
//
// CONSTANTS
//
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// FACTORY METHODS
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Make a RowSource corresponding to the given tuple descriptor.
* </p>
*/
public static RowSource makeRowSource( UniqueTupleDescriptor utd, DataDictionary dd )
throws StandardException
{
if ( utd == null ) { return null; }
else if ( utd instanceof ConglomerateDescriptor )
{
return new ConglomerateRS( (ConglomerateDescriptor) utd, dd );
}
else if ( utd instanceof AliasDescriptor )
{
return new TableFunctionRS( (AliasDescriptor) utd );
}
else { return null; }
}
////////////////////////////////////////////////////////////////////////
//
// ABSTRACT BEHAVIOR
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Bind the conglomerate and table function names in this plan.
* </p>
*
* @param dataDictionary DataDictionary to bind against.
*/
public abstract void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException;
/**
* <p>
* Return true if this the schema and RowSource names have been resolved.
* </p>
*/
public abstract boolean isBound();
/**
* <p>
* Count the number of leaf nodes under (and including) this node.
* </p>
*/
public abstract int countLeafNodes();
/**
* <p>
* Get the leftmost leaf node in this plan.
* </p>
*/
public abstract OptimizerPlan leftmostLeaf();
/**
* <p>
* Return true if this plan is a (left) leading prefix of the other plan.
* </p>
*/
public abstract boolean isLeftPrefixOf( OptimizerPlan that );
////////////////////////////////////////////////////////////////////////
//
// INNER CLASSES
//
////////////////////////////////////////////////////////////////////////
public static final class Join extends OptimizerPlan
{
final JoinStrategy strategy;
final OptimizerPlan leftChild;
final OptimizerPlan rightChild;
private boolean _isBound;
private int _leafNodeCount = 0;
public Join
(
JoinStrategy strategy,
OptimizerPlan leftChild,
OptimizerPlan rightChild
)
{
this.strategy = strategy;
this.leftChild = leftChild;
this.rightChild = rightChild;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// only left-deep trees allowed at this time
if ( !( rightChild instanceof RowSource ) )
{
throw StandardException.newException( SQLState.LANG_NOT_LEFT_DEEP );
}
leftChild.bind( dataDictionary, lcc, cc );
rightChild.bind( dataDictionary, lcc, cc );
_isBound = true;
}
public boolean isBound() { return _isBound; }
public int countLeafNodes()
{
if ( _leafNodeCount <= 0 ) { _leafNodeCount = leftChild.countLeafNodes() + rightChild.countLeafNodes(); }
return _leafNodeCount;
}
public OptimizerPlan leftmostLeaf() { return leftChild.leftmostLeaf(); }
public boolean isLeftPrefixOf( OptimizerPlan other )
{
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
int thisLeafCount = this.countLeafNodes();
int thatLeafCount = that.countLeafNodes();
if ( thisLeafCount > thatLeafCount ) { return false; }
else if ( thisLeafCount < thatLeafCount ) { return isLeftPrefixOf( that.leftChild ); }
else { return this.equals( that ); }
}
public String toString()
{
return
"( " +
leftChild.toString() +
" " + strategy.getOperatorSymbol() + " " +
rightChild.toString() +
" )";
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
if ( !this.strategy.getOperatorSymbol().equals( that.strategy.getOperatorSymbol() ) ) { return false; }
return this.leftChild.equals( that.leftChild) && this.rightChild.equals( that.rightChild );
}
}
/** Generic plan for row sources we don't understand */
public static class DeadEnd extends OptimizerPlan
{
private String _name;
public DeadEnd( String name )
{
_name = name;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{}
public boolean isBound() { return true; }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString() { return _name; }
}
public abstract static class RowSource<D extends UniqueTupleDescriptor> extends OptimizerPlan
{
protected String _schemaName;
protected String _rowSourceName;
protected SchemaDescriptor _schema;
protected D _descriptor;
public RowSource( String schemaName, String rowSourceName )
{
_schemaName = schemaName;
_rowSourceName = rowSourceName;
}
protected RowSource() {}
/** Get the UniqueTupleDescriptor bound to this RowSource */
public D getDescriptor() { return _descriptor; }
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// bind the schema name
if ( _schema == null )
{
_schema = StatementUtil.getSchemaDescriptor( _schemaName, true, dataDictionary, lcc, cc );
_schemaName = _schema.getSchemaName();
}
}
public boolean isBound() { return (_descriptor != null); }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString()
{
return IdUtil.mkQualifiedName( _schemaName, _rowSourceName );
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( other.getClass() != this.getClass() ) { return false; }
RowSource that = (RowSource) other;
if ( !( this.isBound() && that.isBound() ) ) { return false; }
return this._schemaName.equals( that._schemaName ) && this._rowSourceName.equals( that._rowSourceName );
}
}
public static final class ConglomerateRS extends RowSource<ConglomerateDescriptor>
{
public ConglomerateRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public ConglomerateRS( ConglomerateDescriptor cd, DataDictionary dataDictionary )
throws StandardException
{
_descriptor = cd;
_schema = dataDictionary.getSchemaDescriptor( cd.getSchemaID(), null );
_schemaName = _schema.getSchemaName();
_rowSourceName = cd.getConglomerateName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getConglomerateDescriptor( _rowSourceName, _schema, false );
}
if ( _descriptor == null )
{
throw StandardException.newException
( SQLState.LANG_INDEX_NOT_FOUND, _schemaName + "." + _rowSourceName );
}
}
}
public static final class TableFunctionRS extends RowSource<AliasDescriptor>
{
public TableFunctionRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public TableFunctionRS( AliasDescriptor ad )
{
_descriptor = ad;
_schemaName = ad.getSchemaName();
_rowSourceName = ad.getName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getAliasDescriptor
( _schema.getUUID().toString(), _rowSourceName, AliasInfo.ALIAS_NAME_SPACE_FUNCTION_AS_CHAR );
}
if ( _descriptor == null )
{
throw StandardException.newException
(
SQLState.LANG_OBJECT_NOT_FOUND,
AliasDescriptor.getAliasType( AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR ),
_schemaName + "." + _rowSourceName
);
}
}
public String toString() { return super.toString() + "()"; }
}
}
| apache-2.0 |
rackerlabs/lunr | lunr/common/lock.py | 5070 | # Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import json
class NullResource(object):
""" Implments the lock interface for spawn. """
def __init__(self, *args, **kwargs):
self.owned = False
def remove(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace):
pass
def acquire(self, info):
pass
class LockFile(object):
""" Manages locking and unlocking an open file handle
can also be used as a context manager
"""
def __init__(self, fd, lock_operation=fcntl.LOCK_EX,
unlock_operation=fcntl.LOCK_UN):
self.fd = fd
self.file_name = None
if type(fd) != int:
self.fd = self.open(fd)
self.file_name = fd
self.lock_operation = lock_operation
self.unlock_operation = unlock_operation
def __enter__(self):
self.lock(self.lock_operation)
return self
def __exit__(self, exc_type, exc_value, trace):
self.unlock(self.unlock_operation)
return False
def lock(self, operation=fcntl.LOCK_EX):
fcntl.flock(self.fd, operation)
def unlock(self, operation=fcntl.LOCK_UN):
fcntl.flock(self.fd, operation)
def write(self, data):
os.lseek(self.fd, 0, os.SEEK_SET)
os.ftruncate(self.fd, 0)
os.write(self.fd, data)
os.fsync(self.fd)
def read(self):
size = os.lseek(self.fd, 0, os.SEEK_END)
os.lseek(self.fd, 0, os.SEEK_SET)
return os.read(self.fd, size)
def close(self):
try:
os.close(self.fd)
except TypeError, OSError:
pass
self.fd = None
def unlink(self):
self.close()
try:
os.unlink(self.file_name)
except OSError, e:
pass
def _createdir(self, file_name):
try:
dir = os.path.dirname(file_name)
os.makedirs(dir)
except OSError, e:
# ignore if already exists
if e.errno != errno.EEXIST:
raise
def open(self, file_name):
for i in range(0, 2):
try:
# Attempt to create the file
return os.open(file_name, os.O_RDWR | os.O_CREAT)
except OSError, e:
# No such file or directory
if e.errno == errno.ENOENT:
# create the dir and try again
self._createdir(file_name)
continue
# Unknown error
raise
raise RuntimeError("failed to create '%s'" % file_name)
class JsonLockFile(LockFile):
""" Manages a lock file that contains json """
def update(self, info):
data = self.read()
data.update(info)
self.write(data)
def get(self, key, default=None):
try:
data = self.read()
return data[key]
except KeyError:
return default
def write(self, data):
super(JsonLockFile, self).write(json.dumps(data))
def read(self):
try:
return json.loads(super(JsonLockFile, self).read())
except ValueError, e:
return {}
class ResourceFile(JsonLockFile):
""" Manages ownership of a resource file,
can also be used as a context manager
"""
def __init__(self, file_name):
self.file_name = file_name
self.owned = False
self.fd = None
def __enter__(self):
self.fd = self.open(self.file_name)
super(ResourceFile, self).lock()
return self
def __exit__(self, exc_type, exc_value, trace):
super(ResourceFile, self).unlock()
self.close()
return False
def used(self):
""" Returns true if the resource file is in use by someone """
info = self.read()
# If pid is alive, the volume is owned by someone else
if 'pid' in info and self.alive(info['pid']):
return info
return False
def alive(self, pid):
try:
os.kill(pid, 0)
return True
except OSError, e:
return False
def acquire(self, info):
""" Acquire ownership of the file by writing our pid information """
self.update(info)
if 'pid' in info:
# We own the resource
self.owned = True
def remove(self):
if self.owned:
self.unlink()
| apache-2.0 |
ipdcode/containerfs | cmd/datanode/main.go | 1965 | // Copyright (c) 2017, TIG All rights reserved.
// Use of this source code is governed by a Apache License 2.0 that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/tiglabs/containerfs/datanode"
"github.com/tiglabs/containerfs/logger"
"github.com/tiglabs/containerfs/utils"
)
func init() {
var loglevel string
var volMgrHosts string
flag.StringVar(&datanode.DtAddr.Host, "host", "127.0.0.1:8801", "ContainerFS DataNode Host")
flag.StringVar(&datanode.DtAddr.Tier, "tier", "sas", "ContainerFS DataNode Storage Medium")
flag.StringVar(&datanode.DtAddr.Path, "datapath", "", "ContainerFS DataNode Data Path")
flag.StringVar(&datanode.DtAddr.Log, "logpath", "/export/Logs/containerfs/logs/", "ContainerFS Log Path")
flag.StringVar(&loglevel, "loglevel", "error", "ContainerFS Log Level")
flag.StringVar(&volMgrHosts, "volmgr", "10.8.64.216,10.8.64.217,10.8.64.218", "ContainerFS VolMgr hosts")
flag.Parse()
if len(os.Args) >= 2 && (os.Args[1] == "version") {
fmt.Println(utils.Version())
os.Exit(0)
}
tmp := strings.Split(volMgrHosts, ",")
datanode.VolMgrHosts = make([]string, 3)
datanode.VolMgrHosts[0] = tmp[0] + ":7703"
datanode.VolMgrHosts[1] = tmp[1] + ":7713"
datanode.VolMgrHosts[2] = tmp[2] + ":7723"
datanode.DtAddr.Flag = datanode.DtAddr.Path + "/.registryflag"
logger.SetConsole(true)
logger.SetRollingFile(datanode.DtAddr.Log, "datanode.log", 10, 100, logger.MB) //each 100M rolling
switch loglevel {
case "error":
logger.SetLevel(logger.ERROR)
case "debug":
logger.SetLevel(logger.DEBUG)
case "info":
logger.SetLevel(logger.INFO)
default:
logger.SetLevel(logger.ERROR)
}
_, err := os.Stat(datanode.DtAddr.Path)
if err != nil {
logger.Error("data node statup failed : datanode.DtAddr.Path not exist !")
os.Exit(1)
}
datanode.RegistryToVolMgr()
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
datanode.StartDataService()
}
| apache-2.0 |
starkandwayne/cf-cli | Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go | 10464 | package leafnodes_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/internal/leafnodes"
. "github.com/onsi/gomega"
"reflect"
"runtime"
"time"
"github.com/onsi/ginkgo/internal/codelocation"
Failer "github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
)
type runnable interface {
Run() (outcome types.SpecState, failure types.SpecFailure)
CodeLocation() types.CodeLocation
}
func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
var (
outcome types.SpecState
failure types.SpecFailure
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
didRun bool
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
didRun = false
})
Describe("synchronous functions", func() {
Context("when the function passes", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
}, 0, failer, componentCodeLocation).Run()
})
It("should have a succesful outcome", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePassed))
Ω(failure).Should(BeZero())
})
})
Context("when a failure occurs", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
failer.Fail("bam", innerCodeLocation)
panic("should not matter")
}, 0, failer, componentCodeLocation).Run()
})
It("should return the failure", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateFailed))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "bam",
Location: innerCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when a panic occurs", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
innerCodeLocation = codelocation.New(0)
panic("ack!")
}, 0, failer, componentCodeLocation).Run()
})
It("should return the panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
innerCodeLocation.LineNumber++
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Test Panicked",
Location: innerCodeLocation,
ForwardedPanic: "ack!",
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
})
}
func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
var (
outcome types.SpecState
failure types.SpecFailure
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
didRun bool
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
didRun = false
})
Describe("asynchronous functions", func() {
var timeoutDuration time.Duration
BeforeEach(func() {
timeoutDuration = time.Duration(1 * float64(time.Second))
})
Context("when running", func() {
It("should run the function as a goroutine, and block until it's done", func() {
initialNumberOfGoRoutines := runtime.NumGoroutine()
numberOfGoRoutines := 0
build(func(done Done) {
didRun = true
numberOfGoRoutines = runtime.NumGoroutine()
close(done)
}, timeoutDuration, failer, componentCodeLocation).Run()
Ω(didRun).Should(BeTrue())
Ω(numberOfGoRoutines).Should(BeNumerically(">=", initialNumberOfGoRoutines+1))
})
})
Context("when the function passes", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
close(done)
}, timeoutDuration, failer, componentCodeLocation).Run()
})
It("should have a succesful outcome", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePassed))
Ω(failure).Should(BeZero())
})
})
Context("when the function fails", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
failer.Fail("bam", innerCodeLocation)
time.Sleep(20 * time.Millisecond)
panic("doesn't matter")
close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the failure", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateFailed))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "bam",
Location: innerCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when the function times out", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
time.Sleep(20 * time.Millisecond)
panic("doesn't matter")
close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the timeout", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateTimedOut))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Timed out",
Location: componentCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when the function panics", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
innerCodeLocation = codelocation.New(0)
panic("ack!")
}, 100*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
innerCodeLocation.LineNumber++
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Test Panicked",
Location: innerCodeLocation,
ForwardedPanic: "ack!",
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
})
}
func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) {
var (
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
})
Describe("invalid functions", func() {
Context("when passed something that's not a function", func() {
It("should panic", func() {
Ω(func() {
build("not a function", 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
Context("when the function takes the wrong kind of argument", func() {
It("should panic", func() {
Ω(func() {
build(func(oops string) {}, 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
Context("when the function takes more than one argument", func() {
It("should panic", func() {
Ω(func() {
build(func(done Done, oops string) {}, 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
})
}
var _ = Describe("Shared RunnableNode behavior", func() {
Describe("It Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt)
})
Describe("Measure Nodes", func() {
build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewMeasureNode("", func(Benchmarker) {
reflect.ValueOf(body).Call([]reflect.Value{})
}, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3)
})
Describe("BeforeEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach)
})
Describe("AfterEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach)
})
Describe("JustBeforeEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach)
})
})
| apache-2.0 |
tgraf/cilium | pkg/policy/config.go | 1981 | // Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package policy
import (
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy")
mutex lock.RWMutex // Protects enablePolicy
enablePolicy string // Whether policy enforcement is enabled.
)
// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
// - endpoint.AlwaysEnforce
// - endpoint.NeverEnforce
// - endpoint.DefaultEnforcement
func SetPolicyEnabled(val string) {
mutex.Lock()
enablePolicy = val
mutex.Unlock()
}
// GetPolicyEnabled returns the policy enablement configuration
func GetPolicyEnabled() string {
mutex.RLock()
val := enablePolicy
mutex.RUnlock()
return val
}
// AddOptions are options which can be passed to PolicyAdd
type AddOptions struct {
// Replace if true indicates that existing rules with identical labels should be replaced
Replace bool
// ReplaceWithLabels if present indicates that existing rules with the
// given LabelArray should be deleted.
ReplaceWithLabels labels.LabelArray
// Generated should be set as true to signalize a the policy being inserted
// was generated by cilium-agent, e.g. dns poller.
Generated bool
// The source of this policy, one of api, fqdn or k8s
Source string
}
| apache-2.0 |
jbertouch/elasticsearch | core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java | 39991 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.sort;
import org.apache.lucene.spatial.util.GeoHashUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.GeoDistanceQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceRangeQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
public class GeoDistanceIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return pluginList(InternalSettingsPlugin.class);
}
public void testSimpleDistance() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("location").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
indexRandom(true,
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject().field("name", "New York").startObject("location").field("lat", 40.7143528)
.field("lon", -74.0059731).endObject().endObject()),
// to NY: 5.286 km
client().prepareIndex("test", "type1", "2")
.setSource(jsonBuilder().startObject().field("name", "Times Square").startObject("location").field("lat", 40.759011)
.field("lon", -73.9844722).endObject().endObject()),
// to NY: 0.4621 km
client().prepareIndex("test", "type1", "3")
.setSource(jsonBuilder().startObject().field("name", "Tribeca").startObject("location").field("lat", 40.718266)
.field("lon", -74.007819).endObject().endObject()),
// to NY: 1.055 km
client().prepareIndex("test", "type1", "4")
.setSource(jsonBuilder().startObject().field("name", "Wall Street").startObject("location").field("lat", 40.7051157)
.field("lon", -74.0088305).endObject().endObject()),
// to NY: 1.258 km
client().prepareIndex("test", "type1", "5")
.setSource(jsonBuilder().startObject().field("name", "Soho").startObject("location").field("lat", 40.7247222)
.field("lon", -74).endObject().endObject()),
// to NY: 2.029 km
client().prepareIndex("test", "type1", "6")
.setSource(jsonBuilder().startObject().field("name", "Greenwich Village").startObject("location")
.field("lat", 40.731033).field("lon", -73.9962255).endObject().endObject()),
// to NY: 8.572 km
client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject().field("name", "Brooklyn")
.startObject("location").field("lat", 40.65).field("lon", -73.95).endObject().endObject()));
SearchResponse searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
// now with a PLANE type
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").geoDistance(GeoDistance.PLANE).point(40.7143528, -74.0059731))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
// factor type is really too small for this resolution
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("1.0km").to("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 2);
assertThat(searchResponse.getHits().hits().length, equalTo(2));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("1.0km").to("2.0km").optimizeBbox("indexed"))
.execute().actionGet();
assertHitCount(searchResponse, 2);
assertThat(searchResponse.getHits().hits().length, equalTo(2));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).to("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 3);
assertThat(searchResponse.getHits().hits().length, equalTo(3));
// SORTING
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 7);
assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
assertHitCount(searchResponse, 7);
assertOrderedSearchHits(searchResponse, "7", "2", "6", "5", "4", "3", "1");
}
public void testDistanceSortingMVFields() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("locations").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true).field("coerce", true);
}
xContentBuilder.field("ignore_malformed", true).endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("names", "New York")
.startObject("locations").field("lat", 40.7143528).field("lon", -74.0059731).endObject().endObject()).execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("names", "New York 2")
.startObject("locations").field("lat", 400.7143528).field("lon", 285.9990269).endObject().endObject()).execute()
.actionGet();
client().prepareIndex("test", "type1", "3")
.setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
.startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "4")
.setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").startArray("locations")
// to NY: 1.055 km
.startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
// to NY: 1.258 km
.startObject().field("lat", 40.7247222).field("lon", -74).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "5")
.setSource(jsonBuilder().startObject().field("names", "Greenwich Village", "Brooklyn").startArray("locations")
// to NY: 2.029 km
.startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
// to NY: 8.572 km
.startObject().field("lat", 40.65).field("lon", -73.95).endObject().endArray().endObject())
.execute().actionGet();
client().admin().indices().prepareRefresh().execute().actionGet();
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
// Order: Asc, Mode: max
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
// Order: Desc
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
// Order: Desc, Mode: min
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
try {
client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("sum"));
fail("sum should not be supported for sorting by geo distance");
} catch (IllegalArgumentException e) {
// expected
}
}
// Regression bug:
// https://github.com/elasticsearch/elasticsearch/issues/2851
public void testDistanceSortingWithMissingGeoPoint() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("locations").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
.startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").endObject())
.execute().actionGet();
refresh();
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 2);
assertOrderedSearchHits(searchResponse, "1", "2");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
// Order: Desc
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
// Doc with missing geo point is first, is consistent with 0.20.x
assertHitCount(searchResponse, 2);
assertOrderedSearchHits(searchResponse, "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286d, 10d));
}
public void testDistanceSortingNestedFields() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties")
.startObject("name").field("type", "text").endObject().startObject("branches").field("type", "nested")
.startObject("properties").startObject("name").field("type", "text").endObject().startObject("location")
.field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("companies").setSettings(settings).addMapping("company", xContentBuilder));
ensureGreen();
indexRandom(true,
client().prepareIndex("companies", "company", "1")
.setSource(
jsonBuilder().startObject().field("name", "company 1").startArray("branches").startObject()
.field("name", "New York").startObject("location").field("lat", 40.7143528)
.field("lon",
-74.0059731)
.endObject().endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "2")
.setSource(jsonBuilder().startObject().field("name", "company 2").startArray("branches").startObject()
.field("name", "Times Square").startObject("location").field("lat", 40.759011).field("lon", -73.9844722)
.endObject() // to NY: 5.286 km
.endObject().startObject().field("name", "Tribeca").startObject("location").field("lat", 40.718266)
.field("lon", -74.007819).endObject() // to NY:
// 0.4621
// km
.endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "3")
.setSource(jsonBuilder().startObject().field("name", "company 3").startArray("branches").startObject()
.field("name", "Wall Street").startObject("location").field("lat", 40.7051157).field("lon", -74.0088305)
.endObject() // to NY: 1.055 km
.endObject().startObject().field("name", "Soho").startObject("location").field("lat", 40.7247222)
.field("lon", -74).endObject() // to NY: 1.258
// km
.endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "4")
.setSource(jsonBuilder().startObject().field("name", "company 4").startArray("branches").startObject()
.field("name", "Greenwich Village").startObject("location").field("lat", 40.731033)
.field("lon", -73.9962255).endObject() // to NY:
// 2.029
// km
.endObject().startObject().field("name", "Brooklyn").startObject("location").field("lat", 40.65)
.field("lon", -73.95).endObject() // to NY:
// 8.572 km
.endObject().endArray().endObject()));
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
// Order: Asc, Mode: max
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max").setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
// Order: Desc
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.DESC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
// Order: Desc, Mode: min
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min").setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
searchResponse = client().prepareSearch("companies")
.setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
.setNestedPath("branches").sortMode("avg").order(SortOrder.DESC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
.setNestedFilter(termQuery("branches.name", "brooklyn"))
.sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertFirstHit(searchResponse, hasId("4"));
assertSearchHits(searchResponse, "1", "2", "3", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
try {
client().prepareSearch("companies").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731).sortMode("sum")
.setNestedPath("branches"));
fail("Sum should not be allowed as sort mode");
} catch (IllegalArgumentException e) {
//expected
}
}
/**
* Issue 3073
*/
public void testGeoDistanceFilter() throws IOException {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
double lat = 40.720611;
double lon = -73.998776;
XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("location").startObject("properties")
.startObject("pin").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
mapping.field("lat_lon", true);
}
mapping.endObject().endObject().endObject().endObject();
XContentBuilder source = JsonXContent.contentBuilder().startObject().field("pin", GeoHashUtils.stringEncode(lon, lat)).endObject();
assertAcked(prepareCreate("locations").setSettings(settings).addMapping("location", mapping));
client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).execute().actionGet();
refresh();
client().prepareGet("locations", "location", "1").execute().actionGet();
SearchResponse result = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery())
.setPostFilter(QueryBuilders.geoDistanceQuery("pin").geoDistance(GeoDistance.ARC).point(lat, lon).distance("1m")).execute()
.actionGet();
assertHitCount(result, 1);
}
private double randomLon() {
return randomDouble() * 360 - 180;
}
private double randomLat() {
return randomDouble() * 180 - 90;
}
public void testDuelOptimizations() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
if (version.before(Version.V_2_2_0)) {
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", "location", "type=geo_point,lat_lon=true"));
} else {
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", "location", "type=geo_point"));
}
final int numDocs = scaledRandomIntBetween(3000, 10000);
List<IndexRequestBuilder> docs = new ArrayList<>();
for (int i = 0; i < numDocs; ++i) {
docs.add(client().prepareIndex("index", "type").setSource(jsonBuilder().startObject().startObject("location")
.field("lat", randomLat()).field("lon", randomLon()).endObject().endObject()));
}
indexRandom(true, docs);
ensureSearchable();
for (int i = 0; i < 10; ++i) {
final double originLat = randomLat();
final double originLon = randomLon();
final String distance = DistanceUnit.KILOMETERS.toString(randomIntBetween(1, 10000));
for (GeoDistance geoDistance : Arrays.asList(GeoDistance.ARC, GeoDistance.SLOPPY_ARC)) {
logger.info("Now testing GeoDistance={}, distance={}, origin=({}, {})", geoDistance, distance, originLat, originLon);
GeoDistanceQueryBuilder qb = QueryBuilders.geoDistanceQuery("location").point(originLat, originLon).distance(distance)
.geoDistance(geoDistance);
long matches;
if (version.before(Version.V_2_2_0)) {
for (String optimizeBbox : Arrays.asList("none", "memory", "indexed")) {
qb.optimizeBbox(optimizeBbox);
SearchResponse resp = client().prepareSearch("index").setSize(0).setQuery(QueryBuilders.constantScoreQuery(qb))
.execute().actionGet();
matches = assertDuelOptimization(resp);
logger.info("{} -> {} hits", optimizeBbox, matches);
}
} else {
SearchResponse resp = client().prepareSearch("index").setSize(0).setQuery(QueryBuilders.constantScoreQuery(qb))
.execute().actionGet();
matches = assertDuelOptimization(resp);
logger.info("{} hits", matches);
}
}
}
}
private long assertDuelOptimization(SearchResponse resp) {
long matches = -1;
assertSearchResponse(resp);
if (matches < 0) {
matches = resp.getHits().totalHits();
} else {
assertEquals(matches, matches = resp.getHits().totalHits());
}
return matches;
}
}
| apache-2.0 |
hakanu/iftar | _posts_/vakit/CAD/SARH/2017-02-01-SARH.markdown | 473 | ---
layout: vakit_dashboard
title: SARH, CAD için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç
permalink: /CAD/SARH
---
## SARH (CAD) için iftar, namaz vakitleri ve hava durumu görmek için bir ilçe/eyalet seç
Aşağıdaki listeden bir şehir ya da semt seçin
* [ (SARH, CAD) için iftar ve namaz vakitleri](/CAD/SARH/)
<script type="text/javascript">
var GLOBAL_COUNTRY = 'CAD';
var GLOBAL_CITY = 'SARH';
var GLOBAL_STATE = 'SARH';
</script>
| apache-2.0 |
creaITve/apps-android-tbrc-works | wikipedia/src/main/java/org/wikipedia/Utils.java | 28052 | package org.wikipedia;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.Uri;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.os.Parcelable;
import android.preference.PreferenceManager;
import android.telephony.TelephonyManager;
import android.text.InputType;
import android.text.format.DateUtils;
import android.util.Base64;
import android.util.DisplayMetrics;
import android.util.Log;
import android.util.TypedValue;
import android.view.View;
import android.view.Window;
import android.view.inputmethod.InputMethodManager;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.EditText;
import android.widget.Toast;
import com.squareup.otto.Bus;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.mediawiki.api.json.ApiResult;
import org.wikipedia.bridge.CommunicationBridge;
import org.wikipedia.events.WikipediaZeroInterstitialEvent;
import org.wikipedia.events.WikipediaZeroStateChangeEvent;
import org.wikipedia.settings.PrefKeys;
import org.wikipedia.zero.WikipediaZeroTask;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* Contains utility methods that Java doesn't have because we can't make code look too good, can we?
*/
public final class Utils {
private static final int MCC_LENGTH = 3;
private static final int KB16 = 16 * 1024;
/**
* Private constructor, so nobody can construct Utils.
*
* THEIR EVIL PLANS HAVE BEEN THWARTED!!!1
*/
private Utils() { }
/**
* Compares two strings properly, even when one of them is null - without throwing up
*
* @param str1 The first string
* @param str2 Guess?
* @return true if they are both equal (even if both are null)
*/
public static boolean compareStrings(String str1, String str2) {
return (str1 == null ? str2 == null : str1.equals(str2));
}
/**
* Creates an MD5 hash of the provided string & returns its base64 representation
* @param s String to hash
* @return Base64'd MD5 representation of the string passed in
*/
public static String md5base64(final String s) {
try {
// Create MD5 Hash
MessageDigest digest = java.security.MessageDigest.getInstance("MD5");
digest.update(s.getBytes("utf-8"));
byte[] messageDigest = digest.digest();
return Base64.encodeToString(messageDigest, Base64.URL_SAFE | Base64.NO_WRAP);
} catch (NoSuchAlgorithmException e) {
// This will never happen, yes.
throw new RuntimeException(e);
} catch (UnsupportedEncodingException e) {
// This will never happen, yes.
throw new RuntimeException(e);
}
}
/**
* Creates an MD5 hash of the provided string and returns its ASCII representation
* @param s String to hash
* @return ASCII MD5 representation of the string passed in
*/
public static String md5string(String s) {
StringBuilder hexStr = new StringBuilder();
try {
// Create MD5 Hash
MessageDigest digest = java.security.MessageDigest.getInstance("MD5");
digest.update(s.getBytes("utf-8"));
byte[] messageDigest = digest.digest();
final int maxByteVal = 0xFF;
for (byte b : messageDigest) {
hexStr.append(Integer.toHexString(maxByteVal & b));
}
} catch (NoSuchAlgorithmException e) {
// This will never happen, yes.
throw new RuntimeException(e);
} catch (UnsupportedEncodingException e) {
// This will never happen, yes.
throw new RuntimeException(e);
}
return hexStr.toString();
}
/**
* Deletes a file or directory, with optional recursion.
* @param path File or directory to delete.
* @param recursive Whether to delete all subdirectories and files.
*/
public static void delete(File path, boolean recursive) {
if (recursive && path.isDirectory()) {
String[] children = path.list();
for (String child : children) {
delete(new File(path, child), recursive);
}
}
path.delete();
}
/**
* Formats provided date relative to the current system time
* @param date Date to format
* @return String representing the relative time difference of the paramter from current time
*/
public static String formatDateRelative(Date date) {
return DateUtils.getRelativeTimeSpanString(date.getTime(), System.currentTimeMillis(), DateUtils.SECOND_IN_MILLIS, 0).toString();
}
/**
* Ensures that the calling method is on the main thread.
*/
public static void ensureMainThread() {
if (Looper.getMainLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("Method must be called from the Main Thread");
}
}
/**
* Attempt to hide the Android Keyboard.
*
* FIXME: This should not need to exist.
* I do not know why Android does not handle this automatically.
*
* @param activity The current activity
*/
public static void hideSoftKeyboard(Activity activity) {
InputMethodManager keyboard = (InputMethodManager)activity.getSystemService(Context.INPUT_METHOD_SERVICE);
// Not using getCurrentFocus as that sometimes is null, but the keyboard is still up.
keyboard.hideSoftInputFromWindow(activity.getWindow().getDecorView().getWindowToken(), 0);
}
/**
* Attempt to display the Android keyboard.
*
* FIXME: This should not need to exist.
* Android should always show the keyboard at the appropriate time. This method allows you to display the keyboard
* when Android fails to do so.
*
* @param activity The current activity
* @param view The currently focused view that will receive the keyboard input
*/
public static void showSoftKeyboard(Activity activity, View view) {
InputMethodManager keyboard = (InputMethodManager)activity.getSystemService(Context.INPUT_METHOD_SERVICE);
keyboard.showSoftInput(view, InputMethodManager.SHOW_FORCED);
}
/**
* Same as showSoftKeyboard(), but posted to the message queue of the current thread, so that it's executed
* after the current block of code is finished.
* @param activity The current activity
* @param view The currently focused view that will receive the keyboard input
*/
public static void showSoftKeyboardAsync(final Activity activity, final View view) {
view.post(new Runnable() {
@Override
public void run() {
Utils.showSoftKeyboard(activity, view);
}
});
}
public static void setupShowPasswordCheck(final CheckBox check, final EditText edit) {
check.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton compoundButton, boolean isChecked) {
// EditText loses the cursor position when you change the InputType
int curPos = edit.getSelectionStart();
if (isChecked) {
edit.setInputType(InputType.TYPE_CLASS_TEXT);
} else {
edit.setInputType(InputType.TYPE_CLASS_TEXT | InputType.TYPE_TEXT_VARIATION_PASSWORD);
}
edit.setSelection(curPos);
}
});
}
/* Inspect an API response, and fire an event to update the UI for Wikipedia Zero On/Off.
*
* @param app The application object
* @param result An API result to inspect for Wikipedia Zero headers
*/
public static void processHeadersForZero(final WikipediaApp app, final ApiResult result) {
new Handler(Looper.getMainLooper()).post(new Runnable() {
@Override
public void run() {
Map<String, List<String>> headers = result.getHeaders();
boolean responseZeroState = headers.containsKey("X-CS");
if (responseZeroState) {
String xcs = headers.get("X-CS").get(0);
if (!xcs.equals(WikipediaApp.getXcs())) {
identifyZeroCarrier(app, xcs);
}
} else if (WikipediaApp.getWikipediaZeroDisposition()) {
WikipediaApp.setXcs("");
WikipediaApp.setCarrierMessage("");
WikipediaApp.setWikipediaZeroDisposition(responseZeroState);
app.getBus().post(new WikipediaZeroStateChangeEvent());
}
}
});
}
private static final int MESSAGE_ZERO = 1;
public static void identifyZeroCarrier(final WikipediaApp app, final String xcs) {
Handler wikipediaZeroHandler = new Handler(new Handler.Callback(){
private WikipediaZeroTask curZeroTask;
@Override
public boolean handleMessage(Message msg) {
WikipediaZeroTask zeroTask = new WikipediaZeroTask(app.getAPIForSite(app.getPrimarySite()), app) {
@Override
public void onFinish(String message) {
Log.d("Wikipedia", "Wikipedia Zero message: " + message);
if (message != null) {
WikipediaApp.setXcs(xcs);
WikipediaApp.setCarrierMessage(message);
WikipediaApp.setWikipediaZeroDisposition(true);
Bus bus = app.getBus();
bus.post(new WikipediaZeroStateChangeEvent());
curZeroTask = null;
}
}
@Override
public void onCatch(Throwable caught) {
// oh snap
Log.d("Wikipedia", "Wikipedia Zero Eligibility Check Exception Caught");
curZeroTask = null;
}
};
if (curZeroTask != null) {
// if this connection was hung, clean up a bit
curZeroTask.cancel();
}
curZeroTask = zeroTask;
curZeroTask.execute();
return true;
}
});
wikipediaZeroHandler.removeMessages(MESSAGE_ZERO);
Message zeroMessage = Message.obtain();
zeroMessage.what = MESSAGE_ZERO;
zeroMessage.obj = "zero_eligible_check";
wikipediaZeroHandler.sendMessage(zeroMessage);
}
/**
* Read the MCC-MNC (mobile operator code) if available and the cellular data connection is the active one.
* http://lists.wikimedia.org/pipermail/wikimedia-l/2014-April/071131.html
* @param ctx Application context.
* @return The MCC-MNC, typically as ###-##, or null if unable to ascertain (e.g., no actively used cellular)
*/
public static String getMccMnc(Context ctx) {
String mccMncNetwork;
String mccMncSim;
try {
ConnectivityManager conn = (ConnectivityManager) ctx.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = conn.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.getState() == NetworkInfo.State.CONNECTED
&& (networkInfo.getType() == ConnectivityManager.TYPE_MOBILE || networkInfo.getType() == ConnectivityManager.TYPE_WIMAX))
{
TelephonyManager t = (TelephonyManager)ctx.getSystemService(WikipediaApp.TELEPHONY_SERVICE);
if (t != null && t.getPhoneType() >= 0) {
mccMncNetwork = t.getNetworkOperator();
if (mccMncNetwork != null) {
mccMncNetwork = mccMncNetwork.substring(0, MCC_LENGTH) + "-" + mccMncNetwork.substring(MCC_LENGTH);
} else {
mccMncNetwork = "000-00";
}
// TelephonyManager documentation refers to MCC-MNC unreliability on CDMA,
// and we actually see that network and SIM MCC-MNC don't always agree,
// so let's check the SIM, too. Let's not worry if it's CDMA, as the def of CDMA is complex.
mccMncSim = t.getSimOperator();
if (mccMncSim != null) {
mccMncSim = mccMncSim.substring(0, MCC_LENGTH) + "-" + mccMncSim.substring(MCC_LENGTH);
} else {
mccMncSim = "000-00";
}
return mccMncNetwork + "," + mccMncSim;
}
}
return null;
} catch (Throwable t) {
// Because, despite best efforts, things can go wrong and we don't want to crash the app:
return null;
}
}
/**
* Takes a language code (as returned by Android) and returns a wiki code, as used by wikipedia.
*
* @param langCode Language code (as returned by Android)
* @return Wiki code, as used by wikipedia.
*/
public static String langCodeToWikiLang(String langCode) {
// Convert deprecated language codes to modern ones.
// See https://developer.android.com/reference/java/util/Locale.html
if (langCode.equals("iw")) {
return "he"; // Hebrew
} else if (langCode.equals("in")) {
return "id"; // Indonesian
} else if (langCode.equals("ji")) {
return "yi"; // Yiddish
}
return langCode;
}
/**
* List of wiki language codes for which the content is primarily RTL.
*
* Ensure that this is always sorted alphabetically.
*/
private static final String[] RTL_LANGS = {
"ar", "arc", "arz", "bcc", "bqi", "ckb", "dv", "fa", "glk", "ha", "he",
"khw", "ks", "mzn", "pnb", "ps", "sd", "ug", "ur", "yi"
};
/**
* Returns true if the given wiki language is to be displayed RTL.
*
* @param lang Wiki code for the language to check for directionality
* @return true if it is RTL, false if LTR
*/
public static boolean isLangRTL(String lang) {
return Arrays.binarySearch(RTL_LANGS, lang, null) >= 0;
}
/**
* Setup directionality for both UI and content elements in a webview.
*
* @param contentLang The Content language to use to set directionality. Wiki Language code.
* @param uiLang The UI language to use to set directionality. Java language code.
* @param bridge The CommunicationBridge to use to communicate with the WebView
*/
public static void setupDirectionality(String contentLang, String uiLang, CommunicationBridge bridge) {
JSONObject payload = new JSONObject();
try {
if (isLangRTL(contentLang)) {
payload.put("contentDirection", "rtl");
} else {
payload.put("contentDirection", "ltr");
}
if (isLangRTL(langCodeToWikiLang(uiLang))) {
payload.put("uiDirection", "rtl");
} else {
payload.put("uiDirection", "ltr");
}
} catch (JSONException e) {
throw new RuntimeException(e);
}
bridge.sendMessage("setDirectionality", payload);
}
/**
* Sets text direction (RTL / LTR) for given view based on given lang.
*
* Doesn't do anything on pre Android 4.2, since their RTL support is terrible.
*
* @param view View to set direction of
* @param lang Wiki code for the language based on which to set direction
*/
public static void setTextDirection(View view, String lang) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
view.setTextDirection(Utils.isLangRTL(lang) ? View.TEXT_DIRECTION_RTL : View.TEXT_DIRECTION_LTR);
}
}
/**
* Returns db name for given site
*
* WARNING: HARDCODED TO WORK FOR WIKIPEDIA ONLY
*
* @param site Site object to get dbname for
* @return dbname for given site object
*/
public static String getDBNameForSite(Site site) {
return site.getLanguage() + "wiki";
}
public static void handleExternalLink(final Context context, final Uri uri) {
if (WikipediaApp.isWikipediaZeroDevmodeOn() && WikipediaApp.getWikipediaZeroDisposition()) {
SharedPreferences sharedPref = PreferenceManager.getDefaultSharedPreferences(context);
if (sharedPref.getBoolean(PrefKeys.getZeroInterstitial(), true)) {
WikipediaApp.getInstance().getBus().post(new WikipediaZeroInterstitialEvent(uri));
} else {
Utils.visitInExternalBrowser(context, uri);
}
} else {
Utils.visitInExternalBrowser(context, uri);
}
}
/**
* Open the specified URI in an external browser (even if our app's intent filter
* matches the given URI)
*
* @param context Context of the calling app
* @param uri URI to open in an external browser
*/
public static void visitInExternalBrowser(final Context context, Uri uri) {
Intent intent = new Intent();
intent.setAction(Intent.ACTION_VIEW);
intent.setData(uri);
List<ResolveInfo> resInfo = context.getPackageManager().queryIntentActivities(intent, 0);
if (!resInfo.isEmpty()) {
List<Intent> browserIntents = new ArrayList<Intent>();
for (ResolveInfo resolveInfo : resInfo) {
String packageName = resolveInfo.activityInfo.packageName;
// remove our apps from the selection!
// This ensures that all the variants of the Wiki app (Alpha, Beta, Stable) are never shown
if (packageName.startsWith("org.wikipedia")) {
continue;
}
Intent newIntent = new Intent(Intent.ACTION_VIEW);
newIntent.setData(uri);
newIntent.setPackage(packageName);
browserIntents.add(newIntent);
}
if (browserIntents.size() > 0) {
// initialize the chooser intent with one of the browserIntents, and remove that
// intent from the list, since the chooser already has it, and we don't need to
// add it again in putExtra. (initialize with the last item in the list, to preserve order)
Intent chooserIntent = Intent.createChooser(browserIntents.remove(browserIntents.size() - 1), null);
chooserIntent.putExtra(Intent.EXTRA_INITIAL_INTENTS, browserIntents.toArray(new Parcelable[]{}));
context.startActivity(chooserIntent);
return;
}
}
// This means that there was no way to handle this link.
// We will just show a toast now. FIXME: Make this more visible?
Toast.makeText(context, R.string.error_can_not_process_link, Toast.LENGTH_LONG).show();
}
/**
* Utility method to detect whether an Email app is installed,
* for conditionally enabling/disabling email links.
* @param context Context of the calling app.
* @return True if an Email app exists, false otherwise.
*/
public static boolean mailAppExists(Context context) {
Intent intent = new Intent();
intent.setAction(Intent.ACTION_SENDTO);
intent.setData(Uri.parse("mailto:[email protected]"));
List<ResolveInfo> resInfo = context.getPackageManager().queryIntentActivities(intent, 0);
return resInfo.size() > 0;
}
/**
* Utility method to copy a stream into another stream.
*
* Uses a 16KB buffer.
*
* @param in Stream to copy from.
* @param out Stream to copy to.
* @throws IOException
*/
public static void copyStreams(InputStream in, OutputStream out) throws IOException {
byte[] buffer = new byte[KB16]; // 16kb buffer
int len;
while ((len = in.read(buffer)) != -1) {
out.write(buffer, 0, len);
}
}
/**
* Write a JSON object to a file
* @param file file to be written
* @param jsonObject content of file
* @throws IOException when writing failed
*/
public static void writeToFile(File file, JSONObject jsonObject) throws IOException {
OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(file));
try {
writer.write(jsonObject.toString());
} finally {
writer.close();
}
}
/**
* Reads the contents of this page from storage.
* @return Page object with the contents of the page.
* @throws IOException
* @throws JSONException
*/
public static JSONObject readJSONFile(File f) throws IOException, JSONException {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(f)));
try {
StringBuilder stringBuilder = new StringBuilder();
String readStr;
while ((readStr = reader.readLine()) != null) {
stringBuilder.append(readStr);
}
return new JSONObject(stringBuilder.toString());
} finally {
reader.close();
}
}
/**
* Format for formatting/parsing dates to/from the ISO 8601 standard
*/
private static final String ISO8601_FORMAT_STRING = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Parse a date formatted in ISO8601 format.
*
* @param dateString Date String to parse
* @return Parsed Date object.
* @throws ParseException
*/
public static Date parseISO8601(String dateString) throws ParseException {
Date date = new Date();
SimpleDateFormat sdf = new SimpleDateFormat(ISO8601_FORMAT_STRING, Locale.ROOT);
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
date.setTime(sdf.parse(dateString).getTime());
return date;
}
/**
* Format a date to an ISO8601 formatted string.
*
* @param date Date to format.
* @return The given date formatted in ISO8601 format.
*/
public static String formatISO8601(Date date) {
SimpleDateFormat sdf = new SimpleDateFormat(ISO8601_FORMAT_STRING, Locale.ROOT);
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
return sdf.format(date);
}
/**
* Convert a JSONArray object to a String Array.
*
* @param array a JSONArray containing only Strings
* @return a String[] with all the items in the JSONArray
*/
public static String[] jsonArrayToStringArray(JSONArray array) {
if (array == null) {
return null;
}
String[] stringArray = new String[array.length()];
for (int i = 0; i < array.length(); i++) {
stringArray[i] = array.optString(i);
}
return stringArray;
}
/**
* Resolves a potentially protocol relative URL to a 'full' URL
*
* @param url Url to check for (and fix) protocol relativeness
* @return A fully qualified, protocol specified URL
*/
public static String resolveProtocolRelativeUrl(String url) {
String fullUrl;
if (url.startsWith("//")) {
// That's a protocol specific link! Make it https!
fullUrl = WikipediaApp.getInstance().getNetworkProtocol() + ":" + url;
} else {
fullUrl = url;
}
return fullUrl;
}
/**
* Ask user to try connecting again upon (hopefully) recoverable network failure.
*/
public static void toastFail() {
Toast.makeText(WikipediaApp.getInstance(), R.string.error_network_error_try_again, Toast.LENGTH_LONG).show();
}
/**
*
* @param actual The exception object
* @param expected The class you're trying to find, usually tossed by ExceptionImpl.class, for example.
* @return boolean true if the Throwable type was found in the nested exception change, else false.
*/
public static boolean throwableContainsSpecificType(Throwable actual, Class expected) {
if (actual == null) {
return false;
} else if (actual.getClass() == expected) {
return true;
} else {
return throwableContainsSpecificType(actual.getCause(), expected);
}
}
/**
* Calculates the actual font size for the current device, based on an "sp" measurement.
* @param window The window on which the font will be rendered.
* @param fontSp Measurement in "sp" units of the font.
* @return Actual font size for the given sp amount.
*/
public static float getFontSizeFromSp(Window window, float fontSp) {
final DisplayMetrics metrics = new DisplayMetrics();
window.getWindowManager().getDefaultDisplay().getMetrics(metrics);
return fontSp / metrics.scaledDensity;
}
/**
* Resolves the resource ID of a theme-dependent attribute (for example, a color value
* that changes based on the selected theme)
* @param activity The activity whose theme contains the attribute.
* @param id Theme-dependent attribute ID to be resolved.
* @return The actual resource ID of the requested theme-dependent attribute.
*/
public static int getThemedAttributeId(Activity activity, int id) {
TypedValue tv = new TypedValue();
activity.getTheme().resolveAttribute(id, tv, true);
return tv.resourceId;
}
/**
* Returns the distribution channel for the app from AndroidManifest.xml
* @param ctx
* @return The channel (the empty string if not defined)
*/
public static String getChannelDescriptor(Context ctx) {
try {
ApplicationInfo a = ctx.getPackageManager().getApplicationInfo(ctx.getPackageName(), PackageManager.GET_META_DATA);
String channel = a.metaData.getString(PrefKeys.getChannel());
return channel != null ? channel : "";
} catch (Throwable t) {
// oops
return "";
}
}
/**
* Sets the distribution channel for the app into SharedPreferences
* @param ctx
*/
public static void setChannel(Context ctx) {
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(ctx);
String channel = getChannelDescriptor(ctx);
prefs.edit().putString(PrefKeys.getChannel(), channel).commit();
}
/**
* Gets the distribution channel for the app from SharedPreferences
* @param ctx
*/
public static String getChannel(Context ctx) {
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(ctx);
String channel = prefs.getString(PrefKeys.getChannel(), null);
if (channel != null) {
return channel;
} else {
setChannel(ctx);
return getChannel(ctx);
}
}
}
| apache-2.0 |
izenecloud/izenelib | test/3rdparty/zookeeper/t_zookeeper.cpp | 6019 | #include <boost/test/unit_test.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/lambda/bind.hpp>
#include <iostream>
#include <sstream>
#include <3rdparty/zookeeper/ZooKeeper.hpp>
#include <3rdparty/zookeeper/ZooKeeperWatcher.hpp>
#include <3rdparty/zookeeper/ZooKeeperEvent.hpp>
using namespace std;
using namespace boost;
using namespace boost::lambda;
using namespace izenelib::zookeeper;
class WorkerSearch : public ZooKeeperEventHandler
{
public:
virtual void onSessionConnected()
{
}
virtual void onNodeCreated(const std::string& path)
{
cout << "[WorkerSearch] onNodeCreated " << path <<endl;
path_ = path;
}
virtual void onNodeDeleted(const std::string& path)
{
cout << "[WorkerSearch] onNodeDeleted " << path <<endl;
}
virtual void onDataChanged(const std::string& path)
{
cout << "[WorkerSearch] onDataChanged " << path <<endl;
path_ = path;
}
std::string path_;
};
class WorkerMining : public ZooKeeperEventHandler
{
public:
virtual void onSessionConnected()
{
}
virtual void onNodeCreated(const std::string& path)
{
cout << "[WorkerMining] onNodeCreated " << path <<endl;
}
virtual void onNodeDeleted(const std::string& path)
{
cout << "[WorkerMining] onNodeDeleted " << path <<endl;
}
virtual void onDataChanged(const std::string& path)
{
cout << "[WorkerMining] onDataChanged " << path <<endl;
}
};
static const std::string gHosts = "localhost:2181"; //"127.16.0.161:2181,127.16.0.162:2181,127.16.0.163:2181";
//#define ENABLE_ZK_TEST
BOOST_AUTO_TEST_SUITE( t_zookeeper )
BOOST_AUTO_TEST_CASE( check_zookeeper_service )
{
std::cout << "---> Note: start ZooKeeper Service firstly before test." << std::endl;
std::cout << " ZooKeeper Service: "<< gHosts << std::endl;
}
BOOST_AUTO_TEST_CASE( zookeeper_client_basic )
{
std::cout << "---> Test ZooKeeper Client basic functions" << std::endl;
#ifndef ENABLE_ZK_TEST
return;
#endif
std::string hosts = gHosts;
int recvTimeout = 3000;
// Zookeeper Client
ZooKeeper cli(hosts, recvTimeout);
sleep(2);
if (!cli.isConnected())
return;
// remove all
cli.deleteZNode("/SF1", true);
// create
std::string path = "/SF1";
std::string data = "distributed search";
cli.createZNode(path, data, ZooKeeper::ZNODE_NORMAL);
BOOST_CHECK_EQUAL(cli.isZNodeExists(path), true);
BOOST_CHECK_EQUAL(cli.createZNode(path, data, ZooKeeper::ZNODE_NORMAL), false);
// create ephemeral node
if (false) // disable
{
ZooKeeper tmpCli(hosts, recvTimeout);
tmpCli.createZNode("/SF1/ephemeral", "", ZooKeeper::ZNODE_EPHEMERAL);
BOOST_CHECK_EQUAL(tmpCli.isZNodeExists("/SF1/ephemeral"), true);
}
//tmpCli exited...
sleep(1);
BOOST_CHECK_EQUAL(cli.isZNodeExists("/SF1/ephemeral"), false);
// create sequence node
cli.createZNode("/SF1/sequence", "", ZooKeeper::ZNODE_SEQUENCE);
string s1 = cli.getLastCreatedNodePath();
BOOST_CHECK_EQUAL(cli.isZNodeExists(s1), true);
cli.createZNode("/SF1/sequence", "", ZooKeeper::ZNODE_SEQUENCE);
string s2 = cli.getLastCreatedNodePath();
BOOST_CHECK_EQUAL(cli.isZNodeExists(s2), true);
cli.deleteZNode(s1);
cli.deleteZNode(s2);
// get
std::string data_get;
cli.getZNodeData(path, data_get);
BOOST_CHECK_EQUAL(data_get, data);
// set
std::string data2 = "distributed search (sf1-kite)";
BOOST_CHECK_EQUAL(cli.setZNodeData(path, data2), true);
cli.getZNodeData(path, data_get);
BOOST_CHECK_EQUAL(data_get, data2);
// children
std::string master = "/SF1/Master";
std::string worker1 = "/SF1/Worker1";
std::string worker2 = "/SF1/Worker2";
BOOST_CHECK_EQUAL(cli.createZNode(master, "this is master node"), true);
BOOST_CHECK_EQUAL(cli.createZNode(worker1, "remote worker1"), true);
BOOST_CHECK_EQUAL(cli.createZNode(worker2, "remote worker2"), true);
std::vector<std::string> children;
cli.getZNodeChildren("/SF1", children);
BOOST_CHECK_EQUAL(children.size(), 3);
BOOST_CHECK_EQUAL(children[0], master);
BOOST_CHECK_EQUAL(children[1], worker1);
BOOST_CHECK_EQUAL(children[2], worker2);
// display
//cli.showZKNamespace("/SF1");
}
BOOST_AUTO_TEST_CASE( zookeeper_watch )
{
std::cout << "---> Test ZooKeeper Watcher" << std::endl;
#ifndef ENABLE_ZK_TEST
return;
#endif
// Client
std::string hosts = gHosts;
int recvTimeout = 2000;
ZooKeeper cli(hosts, recvTimeout);
sleep(1);
if (!cli.isConnected())
return;
// set event handlers for watcher
WorkerSearch wkSearch;
WorkerMining wkMining;
cli.registerEventHandler(&wkSearch);
cli.registerEventHandler(&wkMining);
// 1. get and watch znode for changes
std::string path = "/SF1/Master";
std::string data_get;
cli.getZNodeData(path, data_get, ZooKeeper::WATCH);
BOOST_CHECK_EQUAL(data_get, "this is master node"); // set in former test case
cli.setZNodeData(path, "master data changed!");
sleep(1); //ensure watcher notified
// master was notified by watcher on znode changed
BOOST_CHECK_EQUAL(wkSearch.path_, path);
cli.getZNodeData(wkSearch.path_, data_get);
BOOST_CHECK_EQUAL(data_get, "master data changed!");
// 2. check exists and watch znode for creation
std::string path2 = "/NotExistedNode";
cli.deleteZNode(path2, true);
BOOST_CHECK_EQUAL(cli.isZNodeExists(path2, ZooKeeper::WATCH), false);
cli.createZNode(path2, "nodata");
sleep(1); //ensure watcher notified
// master was notified by watcher on znode created
BOOST_CHECK_EQUAL(wkSearch.path_, path2);
cli.getZNodeData(wkSearch.path_, data_get);
BOOST_CHECK_EQUAL(data_get, "nodata");
// clear test data from zookeeper servers
cli.deleteZNode(path2, true);
cli.deleteZNode("/SF1", true);
}
BOOST_AUTO_TEST_SUITE_END()
| apache-2.0 |
googleapis/google-cloud-dotnet | apis/Google.Cloud.Compute.V1/Google.Cloud.Compute.V1.GeneratedSnippets/RegionInstanceGroupManagersClient.ListPerInstanceConfigsRequestObjectAsyncSnippet.g.cs | 3764 | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Compute.V1.Snippets
{
// [START compute_v1_generated_RegionInstanceGroupManagers_ListPerInstanceConfigs_async]
using Google.Api.Gax;
using Google.Cloud.Compute.V1;
using System;
using System.Linq;
using System.Threading.Tasks;
public sealed partial class GeneratedRegionInstanceGroupManagersClientSnippets
{
/// <summary>Snippet for ListPerInstanceConfigsAsync</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public async Task ListPerInstanceConfigsRequestObjectAsync()
{
// Create client
RegionInstanceGroupManagersClient regionInstanceGroupManagersClient = await RegionInstanceGroupManagersClient.CreateAsync();
// Initialize request argument(s)
ListPerInstanceConfigsRegionInstanceGroupManagersRequest request = new ListPerInstanceConfigsRegionInstanceGroupManagersRequest
{
Region = "",
OrderBy = "",
Project = "",
InstanceGroupManager = "",
Filter = "",
ReturnPartialSuccess = false,
};
// Make the request
PagedAsyncEnumerable<RegionInstanceGroupManagersListInstanceConfigsResp, PerInstanceConfig> response = regionInstanceGroupManagersClient.ListPerInstanceConfigsAsync(request);
// Iterate over all response items, lazily performing RPCs as required
await response.ForEachAsync((PerInstanceConfig item) =>
{
// Do something with each item
Console.WriteLine(item);
});
// Or iterate over pages (of server-defined size), performing one RPC per page
await response.AsRawResponses().ForEachAsync((RegionInstanceGroupManagersListInstanceConfigsResp page) =>
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (PerInstanceConfig item in page)
{
// Do something with each item
Console.WriteLine(item);
}
});
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<PerInstanceConfig> singlePage = await response.ReadPageAsync(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (PerInstanceConfig item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
}
}
// [END compute_v1_generated_RegionInstanceGroupManagers_ListPerInstanceConfigs_async]
}
| apache-2.0 |
mallocator/express-version-router | README.md | 11674 | # versioned-api-router
[](http://badge.fury.io/js/versioned-api-router)
[](https://travis-ci.org/mallocator/versioned-api-router)
[](https://coveralls.io/github/mallocator/versioned-api-router?branch=master)
[](https://david-dm.org/mallocator/versioned-api-router)
A router for express that manages api versioning and parameter handling.
## Features
* Parse version from param, path, header, cookie or your own implementation
* Match version using numbers, regular expression or [semver](https://github.com/npm/node-semver) format
* Configure your own parameters or headers
* Respond to requests with the matched version in a custom header
* Checks if all required parameters are set on an endpoint
* Automatically sets default values for missing parameters
* Generates a json map of all endpoints for documentation
* Supports nested documents for json POST bodies,
* Customizable error handler per router and for each endpoint
## Installation
```npm install --save versioned-api-router```
## Examples for Version handling
Set up express with the router:
```
var express = require('express');
var app = express();
var versionRouter = require('versioned-api-router');
var router = versionRouter();
var errorHandler = (req, res) => res.status(404).end('not found');
app.use(router, errorHandler); // will only call the errorHandler if it can't resolve the version
```
Set an endpoint to handle a specific version (version 1 in this case):
```
router.get('/myendpoint', 1, (req, res) => res.end('success'));
// curl http://myserver/v1/test => 200 success
// curl http://myserver/test?v=1 => 200 success
// curl -H "X-ApiVersion: 1" http://myserver/test => 200 success
// curl http://myserver/test => 404 not found
```
Set an endpoint to handle a version based on [semver](https://github.com/npm/node-semver):
```
router.get('/myendpoint', /^2/, (req, res) => res.end('success'));
// curl http://myserver/v2/test => 200 success
// curl http://myserver/v2.1/test => 200 success
// curl http://myserver/v2.1.6/test => 200 success
// curl http://myserver/v3/test => 404 not found
```
Set an endpoint to handle a version based on a regular expression:
```
router.get('/myendpoint', /(3|4)/, (req, res) => res.end('success'));
// curl http://myserver/v3/test => 200 success
// curl http://myserver/v4/test => 200 success
// curl http://myserver/v5/test => 404 not found
```
Set an endpoint to accept multiple version using an array:
```
router.get('/myendpoint', [1, '^2', /(3|4)/], (req, res) => res.end('success'));
// curl http://myserver/v1/test => 200 success
// curl http://myserver/v2/test => 200 success
// curl http://myserver/v3/test => 200 success
// curl http://myserver/v4/test => 200 success
```
## Examples for API handling
```
var Router = require('express-rest-api-router);
var router = Router();
router.get('/endpoint', {
description: 'An example endpoint',
params: {
name: 'string'
}
}, (req, res) => {
res.end('hello ' + req.args.name);
});
app.use(Router);
```
With the default configuration this is the output of the different requests to the server:
http://myhost/endpoint =>
```
Status Code 422
```
http://myhost/endpoint (developer mode) =>
```
Status Code 422
{
"error": "Required parameters are missing",
"params": {
"name": {
"type": "string",
"error": "not set"
}
}
}
```
http://myhost/endpoint?name=bob => ```hello bob```
http://myhost/ =>
```
Status Code 200
{
"/endpoint": {
"GET": {
"description": "An example endpoint",
"params": {
"name": {
"type": "string",
"required": true,
"default": undefined
}
}
}
}
}
```
## API
### Router
```
var versionRouter = require('versioned-api-router');
var router = versionRouter({
param: 'v',
header: 'X-ApiVersion',
responseHeader: 'X-ApiVersion',
passVersion: false,
prefix: '/path'
error: (value, req, res, next) => {},
success: (value, req, res, next) => {},
validate: (value, req, res, next) => {},
paramMap: 'arguments',
paramOrder: ['params', 'query', 'cookie', 'body', 'header'],
routerFunction: express.Router
});
```
The router extends the standard express router and allows for all setting the the standard router has to be used.
In addition the router has options specifically for the version mapping:
* param: the parameter name that is used in query and parameter mapping
* header: the header used to look for a requested version
* paramOrder: the order in which parameters are parsed from the client object for all endpoints, the default order is 'params', 'query', 'cookie', 'body', 'header' which map to express properties.
* responseHeader: the name of the header in the response that has information about the matched version. (will be turned off if this is set to falsy)
* passVersion: whether to pass the version on via the request object. this will add two new properties to the request object: incomingVersion and acceptedVersion.
* prefix: A prefix for the api map that will be prepended when printing it via `router.api` or `router.endpoints`.
* error: A global error handler that overrides the default behavior for api errors (not version mismatches).
* success: A success handler that overrides the default behavior for api successes (not version mismatches).
* validate: A global validator the overrides the default behavior for api parameters (not version mismatches).
* paramMap: The property on the request object on which to find parsed parameters.
* paramOrder: The order in which request properties are searched for incoming parameters. Once a parameter has been found it's not going to be overwritten by other properties.
* routerFunction: The router function used to generate Routers
### Router.all / Router.METHOD
```
router.all(path, [version], [api], [callback, ...] callback)
router.METHOD(path, [version], [api], [callback, ...] callback)
```
This method works the same way that the standard express router work, with the addition of an optional version
parameter. Any string, number or regular expression is treated as a version that limits what requests this handler
will respond to.
The path supports all the same options that the standard router does, only caveat is that regular expressions prevent the
use of path parameters which are disabled in that case (parameter and header methods are still supported though). Instead
you can make use of the regular expression subset that express has
[built in](https://expressjs.com/en/guide/routing.html#route-paths) using strings.
The version can be either an array or a single instance of either:
A number - will match that number exactly
A string - will perform [semver](https://github.com/npm/node-semver) matching
A regular expression - will match the incoming version against it
The api configuration is complex enough that is has its own section below label **Api Configuration**
Callbacks can be any handlers that are compatible with standard express handlers and as usual you can set multiple
handlers that will process a request in order. Handlers will receive a req object that now has two additional fields:
req.incomingVersion - The version that came in on the request
req.acceptedVersion - The version that the handler has been configured to accept
### Router.api(req, res);
A standard request handler implementation that will respond with the currently configured api for this router. Can be used to make
it easier for developers to work with your API.
```
app.get('/', router.api);
// => responds via res.jsonp() and prints current endpoint configuration
```
The api handler supports multiple formats that can be specified either through a get or path paramter named format:
```
app.get('/api/:format', router.api);
// => can respond to e.g. /api/tree or /api?format=tree (which doesn't require the path variable).
```
The supported formats are ```json``` (default), ```tree```, ```table```, ```csv```, ```xml```
### Router.endpoints;
A map with the configuration of all the endpoints on this Router. Use this to build your own custom api handlers or to do more advanced
configuration.
```
console.log(router.endpoints)
// => { "path": { "method": { "description": "endpoint description", "params": {}}}}
```
### Router.route
```
router.route(path)
```
This is the same as the original method. Note that versioning is not supported at this time for the route call.
### Router.use
```
router.use([path], [function, ...] function)
```
This is the same as the original method. Note that versioning is not supported at this time for the use call.
### Router.param
```
router.param(name, callback)
```
This is the same as the original method. Note that versioning is not supported at this time for the param call.
## API Configuration
The configuration is where most of the work needs to be done to get the router working the way you want:
#### Router.description
A simple string field that allows to set a description of the endpoint.
```
{
description: 'A description of the endpoint that is printed in the json map'
}
```
#### Router.params
An object that describes the expected parameters for this endpoint. Each parameter is defined as a property of the params object. The parameter definition
supports either a simple string definition in the format of ```type(default)``` or ```type(default)```:
```
{
params: {
aReqioredNumber: 'number',
anOptionalString: 'string(bob)',
aRequiredArray: 'string[]',
anOptionalArray: 'bool[](true, false, false)
}
}
```
or a more complex format as an object with more options:
```
{
params: {
name: {
type: 'string',
default: 'bob',
required: false,
description: 'The users name'
min: 3, // min characters for string, min value for number, ignored for boolean
max: 10, // max characters for string, min value for number, ignored for boolean
validate: (value, cb) => { cb(err, value); } // Function to override validation behavior
error: (error, req, res, next) => {} // Function to override error behavior
success: (null, req, res, next) => {} // Function to trigger on success, does not override request handler
}
}
}
```
Valid types that can be used are: ```bool```, ```number```, ```string```.
Arrays of each type can also be used: ```bool[]```, ```number[]```, ```string[]```.
In addition there's are special types ```any``` and ```*```, which will be treated as strings.
Support for complex objects is only possible in body requests and is tbd. (no support in this plugin so far)
For more examples check out [api-router.test.js](test/api-router.test.js) and [version.test.js](test/version-router.test.js) the test directory
## TODO
* Overlapping api configuration don't work yet. The plan is to merge overlapping configs and make all params optional.
* Better formatting for the endpoint call with versions.
* More test coverage for using both version and api config at the same time.
| apache-2.0 |
svagionitis/aws-sdk-cpp | aws-cpp-sdk-rds/include/aws/rds/model/CreateDBParameterGroupResult.h | 2627 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/rds/RDS_EXPORTS.h>
#include <aws/rds/model/DBParameterGroup.h>
#include <aws/rds/model/ResponseMetadata.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Xml
{
class XmlDocument;
} // namespace Xml
} // namespace Utils
namespace RDS
{
namespace Model
{
class AWS_RDS_API CreateDBParameterGroupResult
{
public:
CreateDBParameterGroupResult();
CreateDBParameterGroupResult(const AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
CreateDBParameterGroupResult& operator=(const AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
inline const DBParameterGroup& GetDBParameterGroup() const{ return m_dBParameterGroup; }
inline void SetDBParameterGroup(const DBParameterGroup& value) { m_dBParameterGroup = value; }
inline void SetDBParameterGroup(DBParameterGroup&& value) { m_dBParameterGroup = std::move(value); }
inline CreateDBParameterGroupResult& WithDBParameterGroup(const DBParameterGroup& value) { SetDBParameterGroup(value); return *this;}
inline CreateDBParameterGroupResult& WithDBParameterGroup(DBParameterGroup&& value) { SetDBParameterGroup(std::move(value)); return *this;}
inline const ResponseMetadata& GetResponseMetadata() const{ return m_responseMetadata; }
inline void SetResponseMetadata(const ResponseMetadata& value) { m_responseMetadata = value; }
inline void SetResponseMetadata(ResponseMetadata&& value) { m_responseMetadata = std::move(value); }
inline CreateDBParameterGroupResult& WithResponseMetadata(const ResponseMetadata& value) { SetResponseMetadata(value); return *this;}
inline CreateDBParameterGroupResult& WithResponseMetadata(ResponseMetadata&& value) { SetResponseMetadata(std::move(value)); return *this;}
private:
DBParameterGroup m_dBParameterGroup;
ResponseMetadata m_responseMetadata;
};
} // namespace Model
} // namespace RDS
} // namespace Aws | apache-2.0 |
ExclamationLabs/struts-1.3.10_docs | struts-taglib/apidocs/org/apache/struts/taglib/bean/ParameterTei.html.md | 7255 | ------------------------------------------------------------------------
<span id="navbar_top"></span> [](#skip-navbar_top "Skip navigation links")
<table>
<colgroup>
<col width="50%" />
<col width="50%" />
</colgroup>
<tbody>
<tr class="odd">
<td align="left"><span id="navbar_top_firstrow"></span>
<table>
<tbody>
<tr class="odd">
<td align="left"><a href="../../../../../overview-summary.html.md"><strong>Overview</strong></a> </td>
<td align="left"><a href="package-summary.html.md"><strong>Package</strong></a> </td>
<td align="left"> <strong>Class</strong> </td>
<td align="left"><a href="class-use/ParameterTei.html.md"><strong>Use</strong></a> </td>
<td align="left"><a href="package-tree.html.md"><strong>Tree</strong></a> </td>
<td align="left"><a href="../../../../../deprecated-list.html.md"><strong>Deprecated</strong></a> </td>
<td align="left"><a href="../../../../../index-all.html.md"><strong>Index</strong></a> </td>
<td align="left"><a href="../../../../../help-doc.html.md"><strong>Help</strong></a> </td>
</tr>
</tbody>
</table></td>
<td align="left"></td>
</tr>
<tr class="even">
<td align="left"> <a href="../../../../../org/apache/struts/taglib/bean/ParameterTag.html.md" title="class in org.apache.struts.taglib.bean"><strong>PREV CLASS</strong></a> <a href="../../../../../org/apache/struts/taglib/bean/ResourceTag.html" title="class in org.apache.struts.taglib.bean"><strong>NEXT CLASS</strong></a></td>
<td align="left"><a href="../../../../../index.html.md?org/apache/struts/taglib/bean/ParameterTei.html"><strong>FRAMES</strong></a> <a href="ParameterTei.html"><strong>NO FRAMES</strong></a>
<a href="../../../../../allclasses-noframe.html.md"><strong>All Classes</strong></a></td>
</tr>
<tr class="odd">
<td align="left">SUMMARY: NESTED | FIELD | <a href="#constructor_summary">CONSTR</a> | <a href="#method_summary">METHOD</a></td>
<td align="left">DETAIL: FIELD | <a href="#constructor_detail">CONSTR</a> | <a href="#method_detail">METHOD</a></td>
</tr>
</tbody>
</table>
<span id="skip-navbar_top"></span>
------------------------------------------------------------------------
org.apache.struts.taglib.bean
Class ParameterTei
-----------------------------
java.lang.Object
javax.servlet.jsp.tagext.TagExtraInfo
org.apache.struts.taglib.bean.ParameterTei
------------------------------------------------------------------------
public class ParameterTei
extends [TagExtraInfo](http://java.sun.com/j2ee/1.4/docs/api/javax/servlet/jsp/tagext/TagExtraInfo.html.md?is-external=true "class or interface in javax.servlet.jsp.tagext")
Implementation of `TagExtraInfo` for the **parameter** tag, identifying the scripting object(s) to be made visible.
**Version:**
$Rev: 471754 $ $Date: 2004-10-16 12:38:42 -0400 (Sat, 16 Oct 2004) $
------------------------------------------------------------------------
<span id="constructor_summary"></span>
| **Constructor Summary** |
|-------------------------|
| ` ParameterTei()`
|
<span id="method_summary"></span>
**Method Summary**
` VariableInfo[]`
` getVariableInfo(TagData data)`
Return information about the scripting variables to be created.
<span id="methods_inherited_from_class_javax.servlet.jsp.tagext.TagExtraInfo"></span>
| **Methods inherited from class javax.servlet.jsp.tagext.[TagExtraInfo](http://java.sun.com/j2ee/1.4/docs/api/javax/servlet/jsp/tagext/TagExtraInfo.html.md?is-external=true "class or interface in javax.servlet.jsp.tagext")** |
|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `getTagInfo, isValid, setTagInfo` |
<span id="methods_inherited_from_class_java.lang.Object"></span>
| **Methods inherited from class java.lang.[Object](http://java.sun.com/j2se/1.4.2/docs/api/java/lang/Object.html.md?is-external=true "class or interface in java.lang")** |
|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait` |
<span id="constructor_detail"></span>
**Constructor Detail**
### ParameterTei
public ParameterTei()
<span id="method_detail"></span>
**Method Detail**
### getVariableInfo
public VariableInfo[] getVariableInfo(TagData data)
Return information about the scripting variables to be created.
**Overrides:**
`getVariableInfo` in class `TagExtraInfo`
------------------------------------------------------------------------
<span id="navbar_bottom"></span> [](#skip-navbar_bottom "Skip navigation links")
<table>
<colgroup>
<col width="50%" />
<col width="50%" />
</colgroup>
<tbody>
<tr class="odd">
<td align="left"><span id="navbar_bottom_firstrow"></span>
<table>
<tbody>
<tr class="odd">
<td align="left"><a href="../../../../../overview-summary.html.md"><strong>Overview</strong></a> </td>
<td align="left"><a href="package-summary.html.md"><strong>Package</strong></a> </td>
<td align="left"> <strong>Class</strong> </td>
<td align="left"><a href="class-use/ParameterTei.html.md"><strong>Use</strong></a> </td>
<td align="left"><a href="package-tree.html.md"><strong>Tree</strong></a> </td>
<td align="left"><a href="../../../../../deprecated-list.html.md"><strong>Deprecated</strong></a> </td>
<td align="left"><a href="../../../../../index-all.html.md"><strong>Index</strong></a> </td>
<td align="left"><a href="../../../../../help-doc.html.md"><strong>Help</strong></a> </td>
</tr>
</tbody>
</table></td>
<td align="left"></td>
</tr>
<tr class="even">
<td align="left"> <a href="../../../../../org/apache/struts/taglib/bean/ParameterTag.html.md" title="class in org.apache.struts.taglib.bean"><strong>PREV CLASS</strong></a> <a href="../../../../../org/apache/struts/taglib/bean/ResourceTag.html" title="class in org.apache.struts.taglib.bean"><strong>NEXT CLASS</strong></a></td>
<td align="left"><a href="../../../../../index.html.md?org/apache/struts/taglib/bean/ParameterTei.html"><strong>FRAMES</strong></a> <a href="ParameterTei.html"><strong>NO FRAMES</strong></a>
<a href="../../../../../allclasses-noframe.html.md"><strong>All Classes</strong></a></td>
</tr>
<tr class="odd">
<td align="left">SUMMARY: NESTED | FIELD | <a href="#constructor_summary">CONSTR</a> | <a href="#method_summary">METHOD</a></td>
<td align="left">DETAIL: FIELD | <a href="#constructor_detail">CONSTR</a> | <a href="#method_detail">METHOD</a></td>
</tr>
</tbody>
</table>
<span id="skip-navbar_bottom"></span>
------------------------------------------------------------------------
Copyright © 2000-2008 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved.
| apache-2.0 |
googleapis/discovery-artifact-manager | clients/php/google-api-php-client-services/src/Google/Service/Logging/Resource/OrganizationsSinks.php | 5423 | <?php
/*
* Copyright 2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* The "sinks" collection of methods.
* Typical usage is:
* <code>
* $loggingService = new Google_Service_Logging(...);
* $sinks = $loggingService->sinks;
* </code>
*/
class Google_Service_Logging_Resource_OrganizationsSinks extends Google_Service_Resource
{
/**
* Creates a sink. (sinks.create)
*
* @param string $parent Required. The resource in which to create the sink.
* Example: `"projects/my-project-id"`. The new sink must be provided in the
* request.
* @param Google_Service_Logging_LogSink $postBody
* @param array $optParams Optional parameters.
*
* @opt_param bool uniqueWriterIdentity Optional. Whether the sink will have a
* dedicated service account returned in the sink's writer_identity. Set this
* field to be true to export logs from one project to a different project. This
* field is ignored for non-project sinks (e.g. organization sinks) because
* those sinks are required to have dedicated service accounts.
* @return Google_Service_Logging_LogSink
*/
public function create($parent, Google_Service_Logging_LogSink $postBody, $optParams = array())
{
$params = array('parent' => $parent, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('create', array($params), "Google_Service_Logging_LogSink");
}
/**
* Deletes a sink. (sinks.delete)
*
* @param string $sinkName Required. The resource name of the sink to delete,
* including the parent resource and the sink identifier. Example: `"projects
* /my-project-id/sinks/my-sink-id"`. It is an error if the sink does not
* exist.
* @param array $optParams Optional parameters.
* @return Google_Service_Logging_LoggingEmpty
*/
public function delete($sinkName, $optParams = array())
{
$params = array('sinkName' => $sinkName);
$params = array_merge($params, $optParams);
return $this->call('delete', array($params), "Google_Service_Logging_LoggingEmpty");
}
/**
* Gets a sink. (sinks.get)
*
* @param string $sinkName Required. The resource name of the sink to return.
* Example: `"projects/my-project-id/sinks/my-sink-id"`.
* @param array $optParams Optional parameters.
* @return Google_Service_Logging_LogSink
*/
public function get($sinkName, $optParams = array())
{
$params = array('sinkName' => $sinkName);
$params = array_merge($params, $optParams);
return $this->call('get', array($params), "Google_Service_Logging_LogSink");
}
/**
* Lists sinks. (sinks.listOrganizationsSinks)
*
* @param string $parent Required. The resource name where this sink was
* created. Example: `"projects/my-logging-project"`.
* @param array $optParams Optional parameters.
*
* @opt_param int pageSize Optional. The maximum number of results to return
* from this request. Non-positive values are ignored. The presence of
* `nextPageToken` in the response indicates that more results might be
* available.
* @opt_param string pageToken Optional. If present, then retrieve the next
* batch of results from the preceding call to this method. `pageToken` must be
* the value of `nextPageToken` from the previous response. The values of other
* method parameters should be identical to those in the previous call.
* @return Google_Service_Logging_ListSinksResponse
*/
public function listOrganizationsSinks($parent, $optParams = array())
{
$params = array('parent' => $parent);
$params = array_merge($params, $optParams);
return $this->call('list', array($params), "Google_Service_Logging_ListSinksResponse");
}
/**
* Updates or creates a sink. (sinks.update)
*
* @param string $sinkName Required. The resource name of the sink to update,
* including the parent resource and the sink identifier. If the sink does not
* exist, this method creates the sink. Example: `"projects/my-project-id/sinks
* /my-sink-id"`.
* @param Google_Service_Logging_LogSink $postBody
* @param array $optParams Optional parameters.
*
* @opt_param bool uniqueWriterIdentity Optional. Whether the sink will have a
* dedicated service account returned in the sink's writer_identity. Set this
* field to be true to export logs from one project to a different project. This
* field is ignored for non-project sinks (e.g. organization sinks) because
* those sinks are required to have dedicated service accounts.
* @return Google_Service_Logging_LogSink
*/
public function update($sinkName, Google_Service_Logging_LogSink $postBody, $optParams = array())
{
$params = array('sinkName' => $sinkName, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('update', array($params), "Google_Service_Logging_LogSink");
}
}
| apache-2.0 |
tangfeixiong/go-for-kubernetes | https0x3A0x2F0x2Fdocs.docker.com0x2Fdocsarchive/https0x3A0x2F0x2Fyum.dockerproject.org0x2Frepo0x2Fmain/README.md | 3880 | # Mirrror docker engine repository
gpg
```
[vagrant@localhost https0x3A0x2F0x2Fapt.dockerproject.org0x2Frepo0x2Fmain]$ ./gpg-curl.sh
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 1648 100 1648 0 0 675 0 0:00:02 0:00:02 --:--:-- 675
```
## Fedora23
repodata
```
[vagrant@localhost https0x3A0x2F0x2Fyum.dockerproject.org0x2Frepo0x2Fmain]$ ./fedora23-repodata-curl.sh
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 9121 100 9121 0 0 2225 0 0:00:04 0:00:04 --:--:-- 2225
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 357k 100 357k 0 0 94271 0 0:00:03 0:00:03 --:--:-- 94273
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 1344k 100 1344k 0 0 47233 0 0:00:29 0:00:29 --:--:-- 57027
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 9088 100 9088 0 0 5762 0 0:00:01 0:00:01 --:--:-- 5762
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 27116 100 27116 0 0 20668 0 0:00:01 0:00:01 --:--:-- 20683
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 23893 100 23893 0 0 14236 0 0:00:01 0:00:01 --:--:-- 14230
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 2990 100 2990 0 0 2579 0 0:00:01 0:00:01 --:--:-- 2579
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 801 100 801 0 0 595 0 0:00:01 0:00:01 --:--:-- 595
```
packages greater than docker engine v1.10
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 13.2M 100 13.2M 0 0 47362 0 0:04:53 0:04:53 --:--:-- 40675
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 19.0M 100 19.0M 0 0 56611 0 0:05:53 0:05:53 --:--:-- 57389
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 4779k 100 4779k 0 0 64620 0 0:01:15 0:01:15 --:--:-- 61004
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 5121k 100 5121k 0 0 60247 0 0:01:27 0:01:27 --:--:-- 65723
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 33186 100 33186 0 0 5687 0 0:00:05 0:00:05 --:--:-- 7708
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 33202 100 33202 0 0 13369 0 0:00:02 0:00:02 --:--:-- 13366
``` | apache-2.0 |
alexsh/cw-omnibus | JVM/InterpreterService/src/com/commonsware/abj/interp/InterpreterService.java | 4412 | /***
Copyright (c) 2008-2012 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
Covered in detail in the book _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.abj.interp;
import android.app.Activity;
import android.app.IntentService;
import android.app.PendingIntent;
import android.content.Intent;
import android.os.Bundle;
import android.util.Log;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.HashMap;
public class InterpreterService extends IntentService {
public static final String SCRIPT="_script";
public static final String BUNDLE="_bundle";
public static final String RESULT="_result";
public static final String BROADCAST_ACTION="com.commonsware.abj.interp.BROADCAST_ACTION";
public static final String BROADCAST_PACKAGE="com.commonsware.abj.interp.BROADCAST_PACKAGE";
public static final String PENDING_RESULT="com.commonsware.abj.interp.PENDING_RESULT";
public static final String RESULT_CODE="com.commonsware.abj.interp.RESULT_CODE";
public static final String ERROR="com.commonsware.abj.interp.ERROR";
public static final String TRACE="com.commonsware.abj.interp.TRACE";
public static final int SUCCESS=1337;
public static final int FAILURE=-1;
private HashMap<String, I_Interpreter> interpreters=new HashMap<String, I_Interpreter>();
public InterpreterService() {
super("InterpreterService");
}
@Override
protected void onHandleIntent(Intent intent) {
String action=intent.getAction();
I_Interpreter interpreter=interpreters.get(action);
if (interpreter==null) {
try {
interpreter=(I_Interpreter)Class.forName(action).newInstance();
interpreters.put(action, interpreter);
}
catch (Throwable t) {
Log.e("InterpreterService", "Error creating interpreter", t);
}
}
if (interpreter==null) {
failure(intent, "Could not create interpreter: "+intent.getAction());
}
else {
try {
success(intent, interpreter.executeScript(intent.getBundleExtra(BUNDLE)));
}
catch (Throwable t) {
Log.e("InterpreterService", "Error executing script", t);
try {
failure(intent, t);
}
catch (Throwable t2) {
Log.e("InterpreterService",
"Error returning exception to client",
t2);
}
}
}
}
private void success(Intent intent, Bundle result) {
Intent data=new Intent();
data.putExtras(result);
data.putExtra(RESULT_CODE, SUCCESS);
send(intent, data);
}
private void failure(Intent intent, String message) {
Intent data=new Intent();
data.putExtra(ERROR, message);
data.putExtra(RESULT_CODE, FAILURE);
send(intent, data);
}
private void failure(Intent intent, Throwable t) {
Intent data=new Intent();
data.putExtra(ERROR, t.getMessage());
data.putExtra(TRACE, getStackTrace(t));
data.putExtra(RESULT_CODE, FAILURE);
send(intent, data);
}
private void send(Intent intent, Intent data) {
String broadcast=intent.getStringExtra(BROADCAST_ACTION);
if (broadcast==null) {
PendingIntent pi=(PendingIntent)intent.getParcelableExtra(PENDING_RESULT);
if (pi!=null) {
try {
pi.send(this, Activity.RESULT_OK, data);
}
catch (PendingIntent.CanceledException e) {
// no-op -- client must be gone
}
}
}
else {
data.setPackage(intent.getStringExtra(BROADCAST_PACKAGE));
data.setAction(broadcast);
sendBroadcast(data);
}
}
private String getStackTrace(Throwable t) {
final StringWriter result=new StringWriter();
final PrintWriter printWriter=new PrintWriter(result);
t.printStackTrace(printWriter);
return(result.toString());
}
} | apache-2.0 |
hlecuanda/paco | Paco/src/com/pacoapp/paco/os/RingtoneUtil.java | 12217 | package com.pacoapp.paco.os;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pacoapp.paco.R;
import com.pacoapp.paco.UserPreferences;
import android.app.Activity;
import android.content.ContentResolver;
import android.content.ContentValues;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.media.Ringtone;
import android.media.RingtoneManager;
import android.net.Uri;
import android.os.Build;
import android.provider.MediaStore;
public class RingtoneUtil {
private static Logger Log = LoggerFactory.getLogger(RingtoneUtil.class);
private static final String RINGTONE_TITLE_COLUMN_NAME = "title";
private static final String PACO_BARK_RINGTONE_TITLE = "Paco Bark";
private static final String BARK_RINGTONE_FILENAME = "deepbark_trial.mp3";
public static final String ALTERNATE_RINGTONE_FILENAME = "PBSRingtone_2.mp3";
public static final String ALTERNATE_RINGTONE_TITLE = "Paco Alternate Alert";
public static final String ALTERNATE_RINGTONE_TITLE_V2 = "Paco Alternate Alert Tone";
public static final String ALTERNATE_RINGTONE_TITLE_V2_FULLPATH = "/assets/ringtone/Paco Alternate Alert Tone";
private Context context;
private UserPreferences userPreferences;
public static final int RINGTONE_REQUESTCODE = 945;
public RingtoneUtil(Context context) {
super();
this.context = context.getApplicationContext();
}
public void XXXinstallPacoBarkRingtone() {
userPreferences = new UserPreferences(context);
if (userPreferences.hasInstalledPacoBarkRingtone()) {
return;
}
File f = copyRingtoneFromAssetsToSdCard(BARK_RINGTONE_FILENAME);
if (f == null) {
return;
}
insertRingtoneFile(f);
}
public void installPacoBarkRingtone() {
UserPreferences userPreferences = new UserPreferences(context);
if (!userPreferences.hasInstalledAlternateRingtone()) {
installRingtone(userPreferences, ALTERNATE_RINGTONE_FILENAME, ALTERNATE_RINGTONE_TITLE, true);
}
// only try once
userPreferences.setAlternateRingtoneInstalled();
if (!userPreferences.hasInstalledPacoBarkRingtone()) {
installRingtone(userPreferences, BARK_RINGTONE_FILENAME, PACO_BARK_RINGTONE_TITLE, false);
}
// only try once
userPreferences.setPacoBarkRingtoneInstalled();
}
public void installRingtone(UserPreferences userPreferences, String ringtoneFilename, String ringtoneTitle, boolean altRingtone) {
File f = copyRingtoneFromAssetsToSdCard(ringtoneFilename);
if (f == null) {
return;
}
ContentValues values = createBarkRingtoneDatabaseEntry(f, ringtoneTitle);
Uri uri = MediaStore.Audio.Media.getContentUriForPath(f.getAbsolutePath());
ContentResolver mediaStoreContentProvider = context.getContentResolver();
Cursor existingRingtoneCursor = mediaStoreContentProvider.query(uri, null, null, null, null); // Note: i want to just retrieve MediaStore.MediaColumns.TITLE and to search on the match, but it is returning null for the TITLE value!!!
Cursor c = mediaStoreContentProvider.query(uri, null, null, null, null);
boolean alreadyInstalled = false;
while (c.moveToNext()) {
int titleColumnIndex = c.getColumnIndex(RINGTONE_TITLE_COLUMN_NAME);
String existingRingtoneTitle = c.getString(titleColumnIndex);
if (existingRingtoneTitle.equals(ringtoneTitle)) {
alreadyInstalled = true;
}
}
existingRingtoneCursor.close();
if (!alreadyInstalled) {
Uri newUri = mediaStoreContentProvider.insert(uri, values);
if (newUri != null) {
if (!altRingtone) {
userPreferences.setRingtoneUri(newUri.toString());
userPreferences.setRingtoneName(ringtoneTitle);
} else {
userPreferences.setAltRingtoneUri(newUri.toString());
userPreferences.setAltRingtoneName(ALTERNATE_RINGTONE_TITLE);
}
}
}
}
private File copyRingtoneFromAssetsToSdCard(String ringtoneFilename) {
InputStream fis = null;
OutputStream fos = null;
try {
fis = context.getAssets().open(ringtoneFilename);
if (fis == null) {
return null;
}
File path = new File(android.os.Environment.getExternalStorageDirectory().getAbsolutePath()
+ "/Android/data/" + context.getPackageName() + "/");
if (!path.exists()) {
path.mkdirs();
}
File f = new File(path, ringtoneFilename);
fos = new FileOutputStream(f);
byte[] buf = new byte[1024];
int len;
while ((len = fis.read(buf)) > 0) {
fos.write(buf, 0, len);
}
return f;
} catch (FileNotFoundException e) {
Log.error("Could not create ringtone file on sd card. Error = " + e.getMessage());
} catch (IOException e) {
Log.error("Either Could not open ringtone from assets. Or could not write to sd card. Error = " + e.getMessage());
return null;
} finally {
if (fos != null) {
try {
fos.close();
} catch (IOException e) {
Log.error("could not close sd card file handle. Error = " + e.getMessage());
}
}
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
Log.error("could not close asset file handle. Error = " + e.getMessage());
}
}
}
return null;
}
private ContentValues createBarkRingtoneDatabaseEntry(File f, String ringtoneTitle) {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.DATA, f.getAbsolutePath());
values.put(MediaStore.MediaColumns.TITLE, ringtoneTitle);
values.put(MediaStore.MediaColumns.SIZE, f.length());
values.put(MediaStore.MediaColumns.MIME_TYPE, "audio/mp3");
values.put(MediaStore.Audio.Media.ARTIST, "Paco");
// values.put(MediaStore.Audio.Media.DURATION, ""); This is not needed
values.put(MediaStore.Audio.Media.IS_RINGTONE, true);
values.put(MediaStore.Audio.Media.IS_NOTIFICATION, true);
values.put(MediaStore.Audio.Media.IS_ALARM, false);
values.put(MediaStore.Audio.Media.IS_MUSIC, false);
return values;
}
/**
* From Stackoverflow issue:
* http://stackoverflow.com/questions/22184729/sqliteconstraintexception-thrown-when-trying-to-insert
* @param filename
* @return
*/
Uri insertRingtoneFile(File filename) {
Uri toneUri = MediaStore.Audio.Media.getContentUriForPath(filename.getAbsolutePath());
// SDK 11+ has the Files store, which already indexed... everything
// We need the file's URI though, so we'll be forced to query
if (Build.VERSION.SDK_INT >= 11) {
Uri uri = null;
Uri filesUri = MediaStore.Files.getContentUri("external");
String[] projection = {MediaStore.MediaColumns._ID, MediaStore.MediaColumns.TITLE};
String selection = MediaStore.MediaColumns.DATA + " = ?";
String[] args = {filename.getAbsolutePath()};
Cursor c = context.getContentResolver().query(filesUri, projection, selection, args, null);
// We expect a single unique record to be returned, since _data is unique
if (c.getCount() == 1) {
c.moveToFirst();
long rowId = c.getLong(c.getColumnIndex(MediaStore.MediaColumns._ID));
String title = c.getString(c.getColumnIndex(MediaStore.MediaColumns.TITLE));
c.close();
uri = MediaStore.Files.getContentUri("external", rowId);
// Since all this stuff was added automatically, it might not have the metadata you want,
// like Title, or Artist, or IsRingtone
if (!title.equals(PACO_BARK_RINGTONE_TITLE)) {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.TITLE, PACO_BARK_RINGTONE_TITLE);
if (context.getContentResolver().update(toneUri, values, null, null) < 1) {
Log.error("could not update ringtome metadata");
}
// Apparently this is best practice, although I have no idea what the Media Scanner
// does with the new data
context.sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, toneUri));
}
}
else if (c.getCount() == 0) {
// I suppose the MediaScanner hasn't run yet, we'll insert it
// ... ommitted
}
else {
throw new UnsupportedOperationException(); // it's expected to be unique!
}
return uri;
}
// For the legacy way, I'm assuming that the file we're working with is in a .nomedia
// folder, so we are the ones who created it in the MediaStore. If this isn't the case,
// consider querying for it and updating the existing record. You should store the URIs
// you create in case you need to delete them from the MediaStore, otherwise you're a
// litter bug :P
else {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.DATA, filename.getAbsolutePath());
values.put(MediaStore.MediaColumns.SIZE, filename.length());
values.put(MediaStore.MediaColumns.DISPLAY_NAME, filename.getName());
values.put(MediaStore.MediaColumns.TITLE, PACO_BARK_RINGTONE_TITLE);
values.put(MediaStore.MediaColumns.MIME_TYPE, "audio/mpeg3");
values.put(MediaStore.Audio.Media.ARTIST, "Paco App");
values.put(MediaStore.Audio.Media.IS_RINGTONE, true);
values.put(MediaStore.Audio.Media.IS_NOTIFICATION, true);
values.put(MediaStore.Audio.Media.IS_ALARM, true);
values.put(MediaStore.Audio.Media.IS_MUSIC, false);
Uri newToneUri = context.getContentResolver().insert(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, values);
userPreferences.setRingtoneUri(newToneUri.toString());
userPreferences.setRingtoneName(PACO_BARK_RINGTONE_TITLE);
userPreferences.setPacoBarkRingtoneInstalled();
// Apparently this is best practice, although I have no idea what the Media Scanner
// does with the new data
context.sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, newToneUri));
return newToneUri;
}
}
public static boolean isOkRingtoneResult(int requestCode, int resultCode) {
return requestCode == RINGTONE_REQUESTCODE && resultCode == Activity.RESULT_OK;
}
public static void updateRingtone(Intent data, final Activity activity) {
Uri uri = data.getParcelableExtra(RingtoneManager.EXTRA_RINGTONE_PICKED_URI);
final UserPreferences userPreferences = new UserPreferences(activity);
if (uri != null) {
userPreferences.setRingtoneUri(uri.toString());
String name= getNameOfRingtone(activity, uri);
userPreferences.setRingtoneName(name);
} else {
userPreferences.clearRingtone();
}
}
public static void launchRingtoneChooserFor(final Activity activity) {
UserPreferences userPreferences = new UserPreferences(activity);
String uri = userPreferences.getRingtoneUri();
Intent intent = new Intent(RingtoneManager.ACTION_RINGTONE_PICKER);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_TYPE, RingtoneManager.TYPE_NOTIFICATION);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_TITLE, R.string.select_signal_tone);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_SHOW_SILENT, false);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_SHOW_DEFAULT, true);
if (uri != null) {
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_EXISTING_URI, Uri.parse(uri));
} else {
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_EXISTING_URI,
RingtoneManager.getDefaultUri(RingtoneManager.TYPE_NOTIFICATION));
}
activity.startActivityForResult(intent, RingtoneUtil.RINGTONE_REQUESTCODE);
}
public static String getNameOfRingtone(Context context, Uri uri) {
Ringtone ringtone = RingtoneManager.getRingtone(context, uri);
return ringtone.getTitle(context);
}
}
| apache-2.0 |
GetTabster/Tabster | Tabster/Plugins/PluginInstance.cs | 2948 | #region
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using Tabster.Core.Plugins;
using Tabster.Utilities;
#endregion
namespace Tabster.Plugins
{
public class PluginInstance
{
private bool _enabled;
private List<Type> _types = new List<Type>();
public PluginInstance(Assembly assembly, ITabsterPlugin plugin, FileInfo fileInfo)
{
Assembly = assembly;
Plugin = plugin;
FileInfo = fileInfo;
}
public Assembly Assembly { get; private set; }
public ITabsterPlugin Plugin { get; private set; }
public FileInfo FileInfo { get; private set; }
public Boolean Enabled
{
get { return _enabled; }
set
{
if (value)
{
try
{
Plugin.Activate();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while activating plugin: {0}", FileInfo.FullName), ex);
}
}
else
{
try
{
Plugin.Deactivate();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while deactivating plugin: {0}", FileInfo.FullName), ex);
}
}
_enabled = value;
}
}
public IEnumerable<T> GetClassInstances<T>()
{
var instances = new List<T>();
var cType = typeof (T);
_types.Clear();
try
{
_types = Assembly.GetTypes().Where(x => x.IsPublic && !x.IsAbstract && !x.IsInterface).ToList();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while loading plugin types: {0}", FileInfo.FullName), ex);
}
foreach (var type in _types)
{
if (cType.IsAssignableFrom(type))
{
try
{
var instance = (T) Activator.CreateInstance(type);
instances.Add(instance);
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while creating plugin type instance: '{0}' in {1}", type.FullName, FileInfo.FullName), ex);
}
}
}
return instances;
}
public bool Contains(Type type)
{
return _types.Contains(type);
}
}
} | apache-2.0 |
liebharc/clFFT | docs/bindings/cl_sys/cl_h/CL_MEM_READ_WRITE.v.html | 365 | <!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="refresh" content="0;URL=constant.CL_MEM_READ_WRITE.html">
</head>
<body>
<p>Redirecting to <a href="constant.CL_MEM_READ_WRITE.html">constant.CL_MEM_READ_WRITE.html</a>...</p>
<script>location.replace("constant.CL_MEM_READ_WRITE.html" + location.search + location.hash);</script>
</body>
</html> | apache-2.0 |
jayway/rest-assured | rest-assured/src/main/java/io/restassured/config/ParamConfig.java | 6343 | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.restassured.config;
import io.restassured.internal.common.assertion.AssertParameter;
import static io.restassured.config.ParamConfig.UpdateStrategy.MERGE;
import static io.restassured.config.ParamConfig.UpdateStrategy.REPLACE;
/**
* Param config determines how different parameter types in REST Assured should be updated when adding multiple parameters
* of the same type with the same name.
*/
public class ParamConfig implements Config {
private final boolean userConfigured;
private final UpdateStrategy queryParamsUpdateStrategy;
private final UpdateStrategy formParamsUpdateStrategy;
private final UpdateStrategy requestParameterUpdateStrategy;
/**
* Create a new instance where all parameters are merged
*/
public ParamConfig() {
this(MERGE, MERGE, MERGE, false);
}
/**
* Create a new instance and specify update strategies for all parameter types.
*
* @param queryParamsUpdateStrategy The update strategy for query parameters
* @param formParamsUpdateStrategy The update strategy for form parameters
* @param requestParameterUpdateStrategy The update strategy for request parameters
*/
public ParamConfig(UpdateStrategy queryParamsUpdateStrategy,
UpdateStrategy formParamsUpdateStrategy,
UpdateStrategy requestParameterUpdateStrategy) {
this(queryParamsUpdateStrategy, formParamsUpdateStrategy, requestParameterUpdateStrategy, true);
}
private ParamConfig(UpdateStrategy queryParamsUpdateStrategy, UpdateStrategy formParamsUpdateStrategy,
UpdateStrategy requestParameterUpdateStrategy, boolean userConfigured) {
AssertParameter.notNull(queryParamsUpdateStrategy, "Query param update strategy");
AssertParameter.notNull(requestParameterUpdateStrategy, "Request param update strategy");
AssertParameter.notNull(formParamsUpdateStrategy, "Form param update strategy");
this.queryParamsUpdateStrategy = queryParamsUpdateStrategy;
this.formParamsUpdateStrategy = formParamsUpdateStrategy;
this.requestParameterUpdateStrategy = requestParameterUpdateStrategy;
this.userConfigured = userConfigured;
}
/**
* Merge all parameter types.
*
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig mergeAllParameters() {
return new ParamConfig(MERGE, MERGE, MERGE, true);
}
/**
* Replace parameter values for all kinds of parameter types.
*
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig replaceAllParameters() {
return new ParamConfig(REPLACE, REPLACE, REPLACE, true);
}
/**
* Set form parameter update strategy to the given value.
*
* @param updateStrategy The update strategy to use for form parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig formParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(queryParamsUpdateStrategy, updateStrategy, requestParameterUpdateStrategy, true);
}
/**
* Set request parameter update strategy to the given value.
* A "request parameter" is a parameter that will turn into a form or query parameter depending on the request. For example:
* <p>
* given().param("name", "value").when().get("/x"). ..
* </p>
*
* @param updateStrategy The update strategy to use for request parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig requestParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(queryParamsUpdateStrategy, formParamsUpdateStrategy, updateStrategy, true);
}
/**
* Set query parameter update strategy to the given value.
*
* @param updateStrategy The update strategy to use for query parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig queryParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(updateStrategy, formParamsUpdateStrategy, requestParameterUpdateStrategy, true);
}
/**
* @return The update strategy for form parameters
*/
public UpdateStrategy formParamsUpdateStrategy() {
return formParamsUpdateStrategy;
}
/**
* @return The update strategy for request parameters
*/
public UpdateStrategy requestParamsUpdateStrategy() {
return requestParameterUpdateStrategy;
}
/**
* @return The update strategy for query parameters
*/
public UpdateStrategy queryParamsUpdateStrategy() {
return queryParamsUpdateStrategy;
}
/**
* {@inheritDoc}
*/
public boolean isUserConfigured() {
return userConfigured;
}
/**
* The update strategy to use for a parameter type
*/
public enum UpdateStrategy {
/**
* Parameters with the same name is merged.
*/
MERGE,
/**
* Parameters with the same name is replaced with the latest applied value.
*/
REPLACE
}
/**
* @return A static way to create a new ParamConfig instance without calling "new" explicitly. Mainly for syntactic sugar.
*/
public static ParamConfig paramConfig() {
return new ParamConfig();
}
/**
* Syntactic sugar.
*
* @return The same ParamConfig instance.
*/
public ParamConfig and() {
return this;
}
/**
* Syntactic sugar.
*
* @return The same ParamConfig instance.
*/
public ParamConfig with() {
return this;
}
}
| apache-2.0 |
hugs/selenium | docs/api/java/org/openqa/selenium/internal/Cleanly.html | 11764 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_0) on Thu Apr 22 22:07:30 BST 2010 -->
<TITLE>
Cleanly
</TITLE>
<META NAME="date" CONTENT="2010-04-22">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Cleanly";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../org/openqa/selenium/internal/Base64Encoder.html" title="class in org.openqa.selenium.internal"><B>PREV CLASS</B></A>
<A HREF="../../../../org/openqa/selenium/internal/FileHandler.html" title="class in org.openqa.selenium.internal"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../index.html?org/openqa/selenium/internal/Cleanly.html" target="_top"><B>FRAMES</B></A>
<A HREF="Cleanly.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
org.openqa.selenium.internal</FONT>
<BR>
Class Cleanly</H2>
<PRE>
java.lang.Object
<IMG SRC="../../../../resources/inherit.gif" ALT="extended by "><B>org.openqa.selenium.internal.Cleanly</B>
</PRE>
<HR>
<DL>
<DT><PRE>public class <B>Cleanly</B><DT>extends java.lang.Object</DL>
</PRE>
<P>
<HR>
<P>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<A NAME="constructor_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Constructor Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#Cleanly()">Cleanly</A></B>()</CODE>
<BR>
</TD>
</TR>
</TABLE>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#close(java.nio.channels.Channel)">close</A></B>(java.nio.channels.Channel toClose)</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#close(java.io.InputStream)">close</A></B>(java.io.InputStream toClose)</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#close(java.io.OutputStream)">close</A></B>(java.io.OutputStream toClose)</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#close(java.io.Reader)">close</A></B>(java.io.Reader reader)</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/openqa/selenium/internal/Cleanly.html#close(java.io.Writer)">close</A></B>(java.io.Writer reader)</CODE>
<BR>
</TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</CODE></TD>
</TR>
</TABLE>
<P>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<A NAME="constructor_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Constructor Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="Cleanly()"><!-- --></A><H3>
Cleanly</H3>
<PRE>
public <B>Cleanly</B>()</PRE>
<DL>
</DL>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="close(java.nio.channels.Channel)"><!-- --></A><H3>
close</H3>
<PRE>
public static void <B>close</B>(java.nio.channels.Channel toClose)</PRE>
<DL>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="close(java.io.InputStream)"><!-- --></A><H3>
close</H3>
<PRE>
public static void <B>close</B>(java.io.InputStream toClose)</PRE>
<DL>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="close(java.io.OutputStream)"><!-- --></A><H3>
close</H3>
<PRE>
public static void <B>close</B>(java.io.OutputStream toClose)</PRE>
<DL>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="close(java.io.Reader)"><!-- --></A><H3>
close</H3>
<PRE>
public static void <B>close</B>(java.io.Reader reader)</PRE>
<DL>
<DD><DL>
</DL>
</DD>
</DL>
<HR>
<A NAME="close(java.io.Writer)"><!-- --></A><H3>
close</H3>
<PRE>
public static void <B>close</B>(java.io.Writer reader)</PRE>
<DL>
<DD><DL>
</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../org/openqa/selenium/internal/Base64Encoder.html" title="class in org.openqa.selenium.internal"><B>PREV CLASS</B></A>
<A HREF="../../../../org/openqa/selenium/internal/FileHandler.html" title="class in org.openqa.selenium.internal"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../index.html?org/openqa/selenium/internal/Cleanly.html" target="_top"><B>FRAMES</B></A>
<A HREF="Cleanly.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| apache-2.0 |
jdel/clairctl | config/config_test.go | 2090 | package config
import (
"fmt"
"os"
"testing"
"github.com/jgsqware/clairctl/test"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
const defaultValues = `
clair:
uri: http://localhost
port: 6060
healthport: 6061
report:
path: reports
format: html
auth:
insecureskipverify: true
clairctl:
ip: ""
tempfolder: /tmp/clairctl
port: 0
`
const customValues = `
clair:
uri: http://clair
port: 6061
healthport: 6062
report:
path: reports/test
format: json
auth:
insecureskipverify: false
clairctl:
ip: "localhost"
tempfolder: /tmp/clairctl/test
port: 64157
`
func TestInitDefault(t *testing.T) {
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(defaultValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("Default values are not correct")
}
viper.Reset()
}
func TestInitCustomLocal(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", ".")
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
func TestInitCustomHome(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", ClairctlHome())
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
func TestInitCustom(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", "/tmp")
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init(tmpfile, "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
| apache-2.0 |
sgmiller/hiveelements | core/src/main/java/common/handlers/admin/PlayerBackupMetadata.java | 349 | package com.beecavegames.common.handlers.admin;
import java.io.Serializable;
import org.joda.time.DateTime;
import lombok.AllArgsConstructor;
@AllArgsConstructor
public class PlayerBackupMetadata implements Serializable {
private static final long serialVersionUID = -485633840234547452L;
public long beeId;
public DateTime timestamp;
}
| apache-2.0 |
ketan/docs.go.cd | content/navigation/_index.md | 245 | ---
title: Navigation
url: /navigation/
---
<html>
<body>
<div style="text-align:center;" class="my-block">
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<h1>GoCD TOUR</h1>
</div>
</body>
</html> | apache-2.0 |
codemercenary/autowiring | contrib/autoboost/autoboost/mpl/for_each.hpp | 3170 |
#ifndef AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
#define AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2008
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#include <autoboost/mpl/is_sequence.hpp>
#include <autoboost/mpl/begin_end.hpp>
#include <autoboost/mpl/apply.hpp>
#include <autoboost/mpl/bool.hpp>
#include <autoboost/mpl/next_prior.hpp>
#include <autoboost/mpl/deref.hpp>
#include <autoboost/mpl/identity.hpp>
#include <autoboost/mpl/assert.hpp>
#include <autoboost/mpl/aux_/config/gpu.hpp>
#include <autoboost/mpl/aux_/unwrap.hpp>
#include <autoboost/type_traits/is_same.hpp>
#include <autoboost/utility/value_init.hpp>
namespace autoboost { namespace mpl {
namespace aux {
template< bool done = true >
struct for_each_impl
{
template<
typename Iterator
, typename LastIterator
, typename TransformFunc
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
static void execute(
Iterator*
, LastIterator*
, TransformFunc*
, F
)
{
}
};
template<>
struct for_each_impl<false>
{
template<
typename Iterator
, typename LastIterator
, typename TransformFunc
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
static void execute(
Iterator*
, LastIterator*
, TransformFunc*
, F f
)
{
typedef typename deref<Iterator>::type item;
typedef typename apply1<TransformFunc,item>::type arg;
// dwa 2002/9/10 -- make sure not to invoke undefined behavior
// when we pass arg.
value_initialized<arg> x;
aux::unwrap(f, 0)(autoboost::get(x));
typedef typename mpl::next<Iterator>::type iter;
for_each_impl<autoboost::is_same<iter,LastIterator>::value>
::execute( static_cast<iter*>(0), static_cast<LastIterator*>(0), static_cast<TransformFunc*>(0), f);
}
};
} // namespace aux
// agurt, 17/mar/02: pointer default parameters are necessary to workaround
// MSVC 6.5 function template signature's mangling bug
template<
typename Sequence
, typename TransformOp
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
inline
void for_each(F f, Sequence* = 0, TransformOp* = 0)
{
AUTOBOOST_MPL_ASSERT(( is_sequence<Sequence> ));
typedef typename begin<Sequence>::type first;
typedef typename end<Sequence>::type last;
aux::for_each_impl< autoboost::is_same<first,last>::value >
::execute(static_cast<first*>(0), static_cast<last*>(0), static_cast<TransformOp*>(0), f);
}
template<
typename Sequence
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
inline
void for_each(F f, Sequence* = 0)
{
// jfalcou: fully qualifying this call so it doesnt clash with autoboostphoenix::for_each
// ons ome compilers -- done on 02/28/2011
autoboost::mpl::for_each<Sequence, identity<> >(f);
}
}}
#endif // AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
| apache-2.0 |
sjcliu/bloc-jams-angular | dist/scripts/filters/timecode.js | 692 | (function() {
function timecode() {
return function(seconds) {
var seconds = Number.parseFloat(seconds);
if (Number.isNaN(seconds)) {
return '-:--'
};
var wholeSeconds = Math.floor(seconds);
var minutes = Math.floor(wholeSeconds / 60);
var remainingSeconds = wholeSeconds % 60;
var output = minutes + ':';
if (remainingSeconds < 10) {
output += '0';
}
output += remainingSeconds;
return output;
};
}
angular
.module('blocJams')
.filter('timecode', timecode);
})();
| apache-2.0 |
aywengo/bootzooka | backend/src/main/scala/com/softwaremill/bootzooka/sql/H2ShellConsole.scala | 457 | package com.softwaremill.bootzooka.sql
import com.softwaremill.bootzooka.common.sql.{DatabaseConfig, SqlDatabase}
import com.typesafe.config.ConfigFactory
object H2ShellConsole extends App {
val config = new DatabaseConfig {
def rootConfig = ConfigFactory.load()
}
println("Note: when selecting from tables, enclose the table name in \" \".")
new org.h2.tools.Shell().runTool("-url", SqlDatabase.embeddedConnectionStringFromConfig(config))
}
| apache-2.0 |
sghill/gocd | server/webapp/WEB-INF/rails.new/vendor/bundle/jruby/1.9/gems/rspec-expectations-2.99.2/lib/rspec/matchers/operator_matcher.rb | 3913 | module RSpec
module Matchers
module BuiltIn
class OperatorMatcher
class << self
def registry
@registry ||= {}
end
def register(klass, operator, matcher)
registry[klass] ||= {}
registry[klass][operator] = matcher
end
def unregister(klass, operator)
registry[klass] && registry[klass].delete(operator)
end
def get(klass, operator)
klass.ancestors.each { |ancestor|
matcher = registry[ancestor] && registry[ancestor][operator]
return matcher if matcher
}
nil
end
end
def initialize(actual)
@actual = actual
end
def self.use_custom_matcher_or_delegate(operator)
define_method(operator) do |expected|
if !has_non_generic_implementation_of?(operator) && matcher = OperatorMatcher.get(@actual.class, operator)
@actual.__send__(::RSpec::Matchers.last_should, matcher.new(expected))
else
eval_match(@actual, operator, expected)
end
end
negative_operator = operator.sub(/^=/, '!')
if negative_operator != operator && respond_to?(negative_operator)
define_method(negative_operator) do |expected|
opposite_should = ::RSpec::Matchers.last_should == :should ? :should_not : :should
raise "RSpec does not support `#{::RSpec::Matchers.last_should} #{negative_operator} expected`. " +
"Use `#{opposite_should} #{operator} expected` instead."
end
end
end
['==', '===', '=~', '>', '>=', '<', '<='].each do |operator|
use_custom_matcher_or_delegate operator
end
def fail_with_message(message)
RSpec::Expectations.fail_with(message, @expected, @actual)
end
def description
"#{@operator} #{@expected.inspect}"
end
private
if Method.method_defined?(:owner) # 1.8.6 lacks Method#owner :-(
def has_non_generic_implementation_of?(op)
Expectations.method_handle_for(@actual, op).owner != ::Kernel
rescue NameError
false
end
else
def has_non_generic_implementation_of?(op)
# This is a bit of a hack, but:
#
# {}.method(:=~).to_s # => "#<Method: Hash(Kernel)#=~>"
#
# In the absence of Method#owner, this is the best we
# can do to see if the method comes from Kernel.
!Expectations.method_handle_for(@actual, op).to_s.include?('(Kernel)')
rescue NameError
false
end
end
def eval_match(actual, operator, expected)
::RSpec::Matchers.last_matcher = self
@operator, @expected = operator, expected
__delegate_operator(actual, operator, expected)
end
end
class PositiveOperatorMatcher < OperatorMatcher
def __delegate_operator(actual, operator, expected)
if actual.__send__(operator, expected)
true
elsif ['==','===', '=~'].include?(operator)
fail_with_message("expected: #{expected.inspect}\n got: #{actual.inspect} (using #{operator})")
else
fail_with_message("expected: #{operator} #{expected.inspect}\n got: #{operator.gsub(/./, ' ')} #{actual.inspect}")
end
end
end
class NegativeOperatorMatcher < OperatorMatcher
def __delegate_operator(actual, operator, expected)
return false unless actual.__send__(operator, expected)
return fail_with_message("expected not: #{operator} #{expected.inspect}\n got: #{operator.gsub(/./, ' ')} #{actual.inspect}")
end
end
end
end
end
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.