repo_name
stringlengths
4
116
path
stringlengths
3
942
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
intfrr/OnTime
AppleWatchSBB/View/HomeScreenItem.h
328
// // HomeScreenItem.h // AppleWatchSBB // // Created by Dylan Marriott on 19.06.15. // Copyright (c) 2015 Dylan Marriott. All rights reserved. // #import <Foundation/Foundation.h> @interface HomeScreenItem : UIView - (instancetype)initWithFrame:(CGRect)frame title:(NSString *)title subtitle:(NSString *)subtitle; @end
apache-2.0
mcanthony/moonstone
samples/DataListSample.html
1300
<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <title>DataList Sample</title> <!-- --> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"> <!-- Less.js (uncomment for client-side rendering of less stylesheets; leave commented to use only CSS) <script src="../../../enyo/tools/minifier/node_modules/less/dist/less-1.7.0.min.js"></script> <script src="../../../enyo/tools/minifier/node_modules/less-plugin-resolution-independence/lib/resolution-independence.js"></script> <script> ;(function(){ var less = window.less || {}; var ri = new enyoLessRiPlugin(); less.plugins = [ri]; window.less = less; }()) </script> --> <!-- --> <script src="../../../enyo/enyo.js" type="text/javascript"></script> <script src="../../layout/package.js" type="text/javascript"></script> <script src="../../moonstone/package.js" type="text/javascript"></script> <script src="../../spotlight/package.js" type="text/javascript"></script> <!-- --> <script src="DataListSample.js" type="text/javascript"></script> <!-- --> </head> <body> <script type="text/javascript"> new moon.sample.DataListSample().renderInto(document.body); </script> </body> </html>
apache-2.0
google/auto
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
139312
/* * Copyright 2018 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.auto.value.processor; import static com.google.common.truth.Truth.assertThat; import static com.google.testing.compile.CompilationSubject.assertThat; import static com.google.testing.compile.CompilationSubject.compilations; import static com.google.testing.compile.Compiler.javac; import static java.util.stream.Collectors.joining; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.truth.Expect; import com.google.testing.compile.Compilation; import com.google.testing.compile.JavaFileObjects; import java.io.IOException; import java.io.PrintWriter; import java.io.UncheckedIOException; import java.io.Writer; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.util.Arrays; import java.util.Set; import javax.annotation.processing.AbstractProcessor; import javax.annotation.processing.RoundEnvironment; import javax.annotation.processing.SupportedAnnotationTypes; import javax.lang.model.SourceVersion; import javax.lang.model.element.Element; import javax.lang.model.element.TypeElement; import javax.lang.model.element.TypeParameterElement; import javax.lang.model.util.ElementFilter; import javax.tools.JavaFileObject; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** @author [email protected] (Éamonn McManus) */ @RunWith(JUnit4.class) public class AutoValueCompilationTest { @Rule public final Expect expect = Expect.create(); @Test public void simpleSuccess() { // Positive test case that ensures we generate the expected code for at least one case. // Most AutoValue code-generation tests are functional, meaning that we check that the generated // code does the right thing rather than checking what it looks like, but this test checks that // we are not generating correct but weird code. JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract long buh();", "", " public static Baz create(long buh) {", " return new AutoValue_Baz(buh);", " }", "}"); JavaFileObject expectedOutput = JavaFileObjects.forSourceLines( "foo.bar.AutoValue_Baz", "package foo.bar;", "", GeneratedImport.importGeneratedAnnotationType(), "", "@Generated(\"" + AutoValueProcessor.class.getName() + "\")", "final class AutoValue_Baz extends Baz {", " private final long buh;", "", " AutoValue_Baz(long buh) {", " this.buh = buh;", " }", "", " @Override public long buh() {", " return buh;", " }", "", " @Override public String toString() {", " return \"Baz{\"", " + \"buh=\" + buh", " + \"}\";", " }", "", " @Override public boolean equals(Object o) {", " if (o == this) {", " return true;", " }", " if (o instanceof Baz) {", " Baz that = (Baz) o;", " return this.buh == that.buh();", " }", " return false;", " }", "", " @Override public int hashCode() {", " int h$ = 1;", " h$ *= 1000003;", " h$ ^= (int) ((buh >>> 32) ^ buh);", " return h$;", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-A" + Nullables.NULLABLE_OPTION + "=") .compile(javaFileObject); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .hasSourceEquivalentTo(expectedOutput); } @Test public void importTwoWays() { // Test that referring to the same class in two different ways does not confuse the import logic // into thinking it is two different classes and that therefore it can't import. The code here // is nonsensical but successfully reproduces a real problem, which is that a TypeMirror that is // extracted using Elements.getTypeElement(name).asType() does not compare equal to one that is // extracted from ExecutableElement.getReturnType(), even though Types.isSameType considers them // equal. So unless we are careful, the java.util.Arrays that we import explicitly to use its // methods will appear different from the java.util.Arrays that is the return type of the // arrays() method here. JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "import java.util.Arrays;", "", "@AutoValue", "public abstract class Baz {", " @SuppressWarnings(\"mutable\")", " public abstract int[] ints();", " public abstract Arrays arrays();", "", " public static Baz create(int[] ints, Arrays arrays) {", " return new AutoValue_Baz(ints, arrays);", " }", "}"); JavaFileObject expectedOutput = JavaFileObjects.forSourceLines( "foo.bar.AutoValue_Baz", "package foo.bar;", "", "import java.util.Arrays;", GeneratedImport.importGeneratedAnnotationType(), "", "@Generated(\"" + AutoValueProcessor.class.getName() + "\")", "final class AutoValue_Baz extends Baz {", " private final int[] ints;", " private final Arrays arrays;", "", " AutoValue_Baz(int[] ints, Arrays arrays) {", " if (ints == null) {", " throw new NullPointerException(\"Null ints\");", " }", " this.ints = ints;", " if (arrays == null) {", " throw new NullPointerException(\"Null arrays\");", " }", " this.arrays = arrays;", " }", "", " @SuppressWarnings(\"mutable\")", " @Override public int[] ints() {", " return ints;", " }", "", " @Override public Arrays arrays() {", " return arrays;", " }", "", " @Override public String toString() {", " return \"Baz{\"", " + \"ints=\" + Arrays.toString(ints) + \", \"", " + \"arrays=\" + arrays", " + \"}\";", " }", "", " @Override public boolean equals(Object o) {", " if (o == this) {", " return true;", " }", " if (o instanceof Baz) {", " Baz that = (Baz) o;", " return Arrays.equals(this.ints, (that instanceof AutoValue_Baz) " + "? ((AutoValue_Baz) that).ints : that.ints())", " && this.arrays.equals(that.arrays());", " }", " return false;", " }", "", " @Override public int hashCode() {", " int h$ = 1;", " h$ *= 1000003;", " h$ ^= Arrays.hashCode(ints);", " h$ *= 1000003;", " h$ ^= arrays.hashCode();", " return h$;", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-A" + Nullables.NULLABLE_OPTION + "=") .compile(javaFileObject); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .hasSourceEquivalentTo(expectedOutput); } @Test public void testNoWarningsFromGenerics() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "@AutoValue", "public abstract class Baz<T extends Number, U extends T> {", " public abstract T t();", " public abstract U u();", " public static <T extends Number, U extends T> Baz<T, U> create(T t, U u) {", " return new AutoValue_Baz<T, U>(t, u);", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(javaFileObject); assertThat(compilation).succeededWithoutWarnings(); } @Test public void testNestedParameterizedTypesWithTypeAnnotations() { JavaFileObject annotFileObject = JavaFileObjects.forSourceLines( "foo.bar.Annot", "package foo.bar;", "", "import java.lang.annotation.ElementType;", "import java.lang.annotation.Target;", "", "@Target(ElementType.TYPE_USE)", "public @interface Annot {", " int value();", "}"); JavaFileObject outerFileObject = JavaFileObjects.forSourceLines( "foo.baz.OuterWithTypeParam", "package foo.baz;", "", "public class OuterWithTypeParam<T extends Number> {", " public class InnerWithTypeParam<U> {}", "}"); JavaFileObject nestyFileObject = JavaFileObjects.forSourceLines( "com.example.Nesty", "package com.example;", "", "import com.google.auto.value.AutoValue;", "import foo.bar.Annot;", "import foo.baz.OuterWithTypeParam;", "", "@AutoValue", "abstract class Nesty {", " abstract @Annot(1) OuterWithTypeParam<@Annot(2) Double>", " .@Annot(3) InnerWithTypeParam<@Annot(4) String> inner();", "", " static Nesty of(", " @Annot(1) OuterWithTypeParam<@Annot(2) Double>", " .@Annot(3) InnerWithTypeParam<@Annot(4) String> inner) {", " return new AutoValue_Nesty(inner);", " }", "}"); JavaFileObject expectedOutput = JavaFileObjects.forSourceLines( "com.example.AutoValue_Nesty", "package com.example;", "", "import foo.bar.Annot;", "import foo.baz.OuterWithTypeParam;", GeneratedImport.importGeneratedAnnotationType(), "", "@Generated(\"com.google.auto.value.processor.AutoValueProcessor\")", "final class AutoValue_Nesty extends Nesty {", " private final @Annot(1) OuterWithTypeParam<@Annot(2) Double>" + ".@Annot(3) InnerWithTypeParam<@Annot(4) String> inner;", "", " AutoValue_Nesty(", " @Annot(1) OuterWithTypeParam<@Annot(2) Double>" + ".@Annot(3) InnerWithTypeParam<@Annot(4) String> inner) {", " if (inner == null) {", " throw new NullPointerException(\"Null inner\");", " }", " this.inner = inner;", " }", "", " @Override", " @Annot(1) OuterWithTypeParam<@Annot(2) Double>" + ".@Annot(3) InnerWithTypeParam<@Annot(4) String> inner() {", " return inner;", " }", "", " @Override", " public String toString() {", " return \"Nesty{\"", " + \"inner=\" + inner", " + \"}\";", " }", "", " @Override", " public boolean equals(Object o) {", " if (o == this) {", " return true;", " }", " if (o instanceof Nesty) {", " Nesty that = (Nesty) o;", " return this.inner.equals(that.inner());", " }", " return false;", " }", "", " @Override", " public int hashCode() {", " int h$ = 1;", " h$ *= 1000003;", " h$ ^= inner.hashCode();", " return h$;", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions( "-Xlint:-processing", "-implicit:none", "-A" + Nullables.NULLABLE_OPTION + "=") .compile(annotFileObject, outerFileObject, nestyFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("com.example.AutoValue_Nesty") .hasSourceEquivalentTo(expectedOutput); } // Tests that type annotations are correctly copied from the bounds of type parameters in the // @AutoValue class to the bounds of the corresponding parameters in the generated class. For // example, if we have `@AutoValue abstract class Foo<T extends @NullableType Object>`, then the // generated class should be `class AutoValue_Foo<T extends @NullableType Object> extends Foo<T>`. // Some buggy versions of javac do not report type annotations correctly in this context. // AutoValue can't copy them if it can't see them, so we make a special annotation processor to // detect if we are in the presence of this bug and if so we don't fail. @Test public void testTypeParametersWithAnnotationsOnBounds() { @SupportedAnnotationTypes("*") class CompilerBugProcessor extends AbstractProcessor { boolean checkedAnnotationsOnTypeBounds; boolean reportsAnnotationsOnTypeBounds; @Override public SourceVersion getSupportedSourceVersion() { return SourceVersion.latestSupported(); } @Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { if (roundEnv.processingOver()) { TypeElement test = processingEnv.getElementUtils().getTypeElement("com.example.Test"); TypeParameterElement t = test.getTypeParameters().get(0); this.checkedAnnotationsOnTypeBounds = true; this.reportsAnnotationsOnTypeBounds = !t.getBounds().get(0).getAnnotationMirrors().isEmpty(); } return false; } } CompilerBugProcessor compilerBugProcessor = new CompilerBugProcessor(); JavaFileObject nullableTypeFileObject = JavaFileObjects.forSourceLines( "foo.bar.NullableType", "package foo.bar;", "", "import java.lang.annotation.ElementType;", "import java.lang.annotation.Target;", "", "@Target(ElementType.TYPE_USE)", "public @interface NullableType {}"); JavaFileObject autoValueFileObject = JavaFileObjects.forSourceLines( "com.example.Test", "package com.example;", "", "import com.google.auto.value.AutoValue;", "import foo.bar.NullableType;", "", "@AutoValue", "abstract class Test<T extends @NullableType Object & @NullableType Cloneable> {}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), compilerBugProcessor) .withOptions("-Xlint:-processing", "-implicit:none") .compile(nullableTypeFileObject, autoValueFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilerBugProcessor.checkedAnnotationsOnTypeBounds).isTrue(); if (compilerBugProcessor.reportsAnnotationsOnTypeBounds) { assertThat(compilation) .generatedSourceFile("com.example.AutoValue_Test") .contentsAsUtf8String() .contains( "class AutoValue_Test<T extends @NullableType Object & @NullableType Cloneable>" + " extends Test<T> {"); } } // In the following few tests, see AutoValueProcessor.validateMethods for why unrecognized // abstract methods provoke only a warning rather than an error. Compilation will fail anyway // because the generated class is not abstract and does not implement the unrecognized methods. @Test public void testAbstractVoid() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "@AutoValue", "public abstract class Baz {", " public abstract void foo();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation).failed(); assertThat(compilation) .hadWarningContaining( "Abstract method is neither a property getter nor a Builder converter") .inFile(javaFileObject) .onLineContaining("void foo()"); } @Test public void testAbstractWithParams() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "@AutoValue", "public abstract class Baz {", " public abstract int foo(int bar);", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation).failed(); assertThat(compilation) .hadWarningContaining( "Abstract method is neither a property getter nor a Builder converter") .inFile(javaFileObject) .onLineContaining("int foo(int bar)"); } @Test public void testPrimitiveArrayWarning() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "@AutoValue", "public abstract class Baz {", " public abstract byte[] bytes();", " public static Baz create(byte[] bytes) {", " return new AutoValue_Baz(bytes);", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation).succeeded(); assertThat(compilation) .hadWarningContaining( "An @AutoValue property that is a primitive array returns the original array") .inFile(javaFileObject) .onLineContaining("byte[] bytes()"); } @Test public void testPrimitiveArrayWarningFromParent() { // If the array-valued property is defined by an ancestor then we shouldn't try to attach // the warning to the method that defined it, but rather to the @AutoValue class itself. JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "public abstract class Baz {", " public abstract byte[] bytes();", "", " @AutoValue", " public abstract static class BazChild extends Baz {", " public static BazChild create(byte[] bytes) {", " return new AutoValue_Baz_BazChild(bytes);", " }", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation).succeeded(); assertThat(compilation) .hadWarningContainingMatch( "An @AutoValue property that is a primitive array returns the original array" + ".*foo\\.bar\\.Baz\\.bytes") .inFile(javaFileObject) .onLineContaining("BazChild extends Baz"); } @Test public void testPrimitiveArrayWarningSuppressed() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "import com.google.auto.value.AutoValue;", "@AutoValue", "public abstract class Baz {", " @SuppressWarnings(\"mutable\")", " public abstract byte[] bytes();", " public static Baz create(byte[] bytes) {", " return new AutoValue_Baz(bytes);", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(javaFileObject); assertThat(compilation).succeededWithoutWarnings(); } @Test public void autoValueMustBeClass() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public interface Baz {", " String buh();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue only applies to classes") .inFile(javaFileObject) .onLineContaining("interface Baz"); } @Test public void autoValueMustNotBeFinal() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public final class Baz {", " public Baz create() {", " return new AutoValue_Baz();", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue class must not be final") .inFile(javaFileObject) .onLineContaining("class Baz"); } @Test public void autoValueMustBeStatic() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "public class Baz {", " @AutoValue", " public abstract class NotStatic {", " public abstract String buh();", " public NotStatic create(String buh) {", " return new AutoValue_Baz_NotStatic(buh);", " }", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Nested @AutoValue class must be static") .inFile(javaFileObject) .onLineContaining("abstract class NotStatic"); } @Test public void autoValueMustNotBePrivate() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "public class Baz {", " @AutoValue", " private abstract static class Private {", " public abstract String buh();", " public Private create(String buh) {", " return new AutoValue_Baz_Private(buh);", " }", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue class must not be private") .inFile(javaFileObject) .onLineContaining("class Private"); } @Test public void autoValueMustBeNotBeNestedInPrivate() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "public class Baz {", " private static class Private {", " @AutoValue", " abstract static class Nested {", " public abstract String buh();", " public Nested create(String buh) {", " return new AutoValue_Baz_Private_Nested(buh);", " }", " }", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue class must not be nested in a private class") .inFile(javaFileObject) .onLineContaining("class Nested"); } @Test public void autoValueMustHaveNoArgConstructor() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " Baz(int buh) {}", "", " public abstract int buh();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue class must have a non-private no-arg constructor") .inFile(javaFileObject) .onLineContaining("class Baz"); } @Test public void autoValueMustHaveVisibleNoArgConstructor() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " private Baz() {}", "", " public abstract int buh();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue class must have a non-private no-arg constructor") .inFile(javaFileObject) .onLineContaining("class Baz"); } @Test public void noMultidimensionalPrimitiveArrays() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract int[][] ints();", "", " public static Baz create(int[][] ints) {", " return new AutoValue_Baz(ints);", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "@AutoValue class cannot define an array-valued property " + "unless it is a primitive array") .inFile(javaFileObject) .onLineContaining("int[][] ints()"); } @Test public void noObjectArrays() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract String[] strings();", "", " public static Baz create(String[] strings) {", " return new AutoValue_Baz(strings);", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "@AutoValue class cannot define an array-valued property " + "unless it is a primitive array") .inFile(javaFileObject) .onLineContaining("String[] strings()"); } @Test public void annotationOnInterface() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public interface Baz {}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("AutoValue only applies to classes") .inFile(javaFileObject) .onLineContaining("interface Baz"); } @Test public void annotationOnEnum() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public enum Baz {}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("AutoValue only applies to classes") .inFile(javaFileObject) .onLineContaining("enum Baz"); } @Test public void extendAutoValue() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Outer", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "public class Outer {", " @AutoValue", " static abstract class Parent {", " static Parent create(int randomProperty) {", " return new AutoValue_Outer_Parent(randomProperty);", " }", "", " abstract int randomProperty();", " }", "", " @AutoValue", " static abstract class Child extends Parent {", " static Child create(int randomProperty) {", " return new AutoValue_Outer_Child(randomProperty);", " }", "", " abstract int randomProperty();", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("may not extend") .inFile(javaFileObject) .onLineContaining("Child extends Parent"); } @Test public void bogusSerialVersionUID() { String[] mistakes = { "final long serialVersionUID = 1234L", // not static "static long serialVersionUID = 1234L", // not final "static final Long serialVersionUID = 1234L", // not long "static final long serialVersionUID = (Long) 1234L", // not a compile-time constant }; for (String mistake : mistakes) { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz implements java.io.Serializable {", " " + mistake + ";", "", " public abstract int foo();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); expect .about(compilations()) .that(compilation) .hadErrorContaining("serialVersionUID must be a static final long compile-time constant") .inFile(javaFileObject) .onLineContaining(mistake); } } @Test public void nonExistentSuperclass() { // The main purpose of this test is to check that AutoValueProcessor doesn't crash the // compiler in this case. JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Existent extends NonExistent {", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("NonExistent") .inFile(javaFileObject) .onLineContaining("NonExistent"); } @Test public void cannotImplementAnnotation() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.RetentionImpl", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import java.lang.annotation.Retention;", "import java.lang.annotation.RetentionPolicy;", "", "@AutoValue", "public abstract class RetentionImpl implements Retention {", " public static Retention create(RetentionPolicy policy) {", " return new AutoValue_RetentionImpl(policy);", " }", "", " @Override public Class<? extends Retention> annotationType() {", " return Retention.class;", " }", "", " @Override public boolean equals(Object o) {", " return (o instanceof Retention && value().equals((Retention) o).value());", " }", "", " @Override public int hashCode() {", " return (\"value\".hashCode() * 127) ^ value().hashCode();", " }", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("may not be used to implement an annotation interface") .inFile(javaFileObject) .onLineContaining("RetentionImpl implements Retention"); } @Test public void missingPropertyType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract MissingType missingType();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("MissingType") .inFile(javaFileObject) .onLineContaining("MissingType"); } @Test public void missingGenericPropertyType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract MissingType<?> missingType();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("MissingType") .inFile(javaFileObject) .onLineContaining("MissingType"); } @Test public void missingComplexGenericPropertyType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "import java.util.Map;", "import java.util.Set;", "", "@AutoValue", "public abstract class Baz {", " public abstract Map<Set<?>, MissingType<?>> missingType();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("MissingType") .inFile(javaFileObject) .onLineContaining("MissingType"); } @Test public void missingSuperclassGenericParameter() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T extends MissingType<?>> {", " public abstract int foo();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("MissingType") .inFile(javaFileObject) .onLineContaining("MissingType"); } @Test public void nullablePrimitive() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " @interface Nullable {}", " public abstract @Nullable int foo();", "}"); Compilation compilation = javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Primitive types cannot be @Nullable") .inFile(javaFileObject) .onLineContaining("@Nullable int"); } @Test public void correctBuilder() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.base.Optional;", "import com.google.common.collect.ImmutableMap;", "", "import java.util.ArrayList;", "import java.util.List;", "import java.util.Map;", "import javax.annotation.Nullable;", "", "@AutoValue", "public abstract class Baz<T extends Number> {", " public abstract int anInt();", " @SuppressWarnings(\"mutable\")", " public abstract byte[] aByteArray();", " @SuppressWarnings(\"mutable\")", " @Nullable public abstract int[] aNullableIntArray();", " public abstract List<T> aList();", " public abstract ImmutableMap<T, String> anImmutableMap();", " public abstract Optional<String> anOptionalString();", " public abstract NestedAutoValue<T> aNestedAutoValue();", "", " public abstract Builder<T> toBuilder();", "", " @AutoValue.Builder", " public abstract static class Builder<T extends Number> {", " public abstract Builder<T> anInt(int x);", " public abstract Builder<T> aByteArray(byte[] x);", " public abstract Builder<T> aNullableIntArray(@Nullable int[] x);", " public abstract Builder<T> aList(List<T> x);", " public abstract Builder<T> anImmutableMap(Map<T, String> x);", " public abstract ImmutableMap.Builder<T, String> anImmutableMapBuilder();", " public abstract Builder<T> anOptionalString(Optional<String> s);", " public abstract Builder<T> anOptionalString(String s);", " public abstract NestedAutoValue.Builder<T> aNestedAutoValueBuilder();", "", " public Builder<T> aList(ArrayList<T> x) {", // ArrayList should not be imported in the generated class. " return aList((List<T>) x);", " }", "", " public abstract Optional<Integer> anInt();", " public abstract List<T> aList();", " public abstract ImmutableMap<T, String> anImmutableMap();", "", " public abstract Baz<T> build();", " }", "", " public static <T extends Number> Builder<T> builder() {", " return AutoValue_Baz.builder();", " }", "}"); JavaFileObject nestedJavaFileObject = JavaFileObjects.forSourceLines( "foo.bar.NestedAutoValue", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class NestedAutoValue<T extends Number> {", " public abstract T t();", "", " public abstract Builder<T> toBuilder();", "", " @AutoValue.Builder", " public abstract static class Builder<T extends Number> {", " public abstract Builder<T> t(T t);", " public abstract NestedAutoValue<T> build();", " }", "", " public static <T extends Number> Builder<T> builder() {", " return AutoValue_NestedAutoValue.builder();", " }", "}"); JavaFileObject expectedOutput = JavaFileObjects.forSourceLines( "foo.bar.AutoValue_Baz", "package foo.bar;", "", "import com.google.common.base.Optional;", "import com.google.common.collect.ImmutableMap;", "import java.util.Arrays;", "import java.util.List;", "import java.util.Map;", sorted( GeneratedImport.importGeneratedAnnotationType(), "import javax.annotation.Nullable;"), "", "@Generated(\"" + AutoValueProcessor.class.getName() + "\")", "final class AutoValue_Baz<T extends Number> extends Baz<T> {", " private final int anInt;", " private final byte[] aByteArray;", " private final int[] aNullableIntArray;", " private final List<T> aList;", " private final ImmutableMap<T, String> anImmutableMap;", " private final Optional<String> anOptionalString;", " private final NestedAutoValue<T> aNestedAutoValue;", "", " private AutoValue_Baz(", " int anInt,", " byte[] aByteArray,", " @Nullable int[] aNullableIntArray,", " List<T> aList,", " ImmutableMap<T, String> anImmutableMap,", " Optional<String> anOptionalString,", " NestedAutoValue<T> aNestedAutoValue) {", " this.anInt = anInt;", " this.aByteArray = aByteArray;", " this.aNullableIntArray = aNullableIntArray;", " this.aList = aList;", " this.anImmutableMap = anImmutableMap;", " this.anOptionalString = anOptionalString;", " this.aNestedAutoValue = aNestedAutoValue;", " }", "", " @Override public int anInt() {", " return anInt;", " }", "", " @SuppressWarnings(\"mutable\")", " @Override public byte[] aByteArray() {", " return aByteArray;", " }", "", " @SuppressWarnings(\"mutable\")", " @Nullable", " @Override public int[] aNullableIntArray() {", " return aNullableIntArray;", " }", "", " @Override public List<T> aList() {", " return aList;", " }", "", " @Override public ImmutableMap<T, String> anImmutableMap() {", " return anImmutableMap;", " }", "", " @Override public Optional<String> anOptionalString() {", " return anOptionalString;", " }", "", " @Override public NestedAutoValue<T> aNestedAutoValue() {", " return aNestedAutoValue;", " }", "", " @Override public String toString() {", " return \"Baz{\"", " + \"anInt=\" + anInt + \", \"", " + \"aByteArray=\" + Arrays.toString(aByteArray) + \", \"", " + \"aNullableIntArray=\" + Arrays.toString(aNullableIntArray) + \", \"", " + \"aList=\" + aList + \", \"", " + \"anImmutableMap=\" + anImmutableMap + \", \"", " + \"anOptionalString=\" + anOptionalString + \", \"", " + \"aNestedAutoValue=\" + aNestedAutoValue", " + \"}\";", " }", "", " @Override public boolean equals(Object o) {", " if (o == this) {", " return true;", " }", " if (o instanceof Baz) {", " Baz<?> that = (Baz<?>) o;", " return this.anInt == that.anInt()", " && Arrays.equals(this.aByteArray, " + "(that instanceof AutoValue_Baz) " + "? ((AutoValue_Baz<?>) that).aByteArray : that.aByteArray())", " && Arrays.equals(this.aNullableIntArray, " + "(that instanceof AutoValue_Baz) " + "? ((AutoValue_Baz<?>) that).aNullableIntArray : that.aNullableIntArray())", " && this.aList.equals(that.aList())", " && this.anImmutableMap.equals(that.anImmutableMap())", " && this.anOptionalString.equals(that.anOptionalString())", " && this.aNestedAutoValue.equals(that.aNestedAutoValue());", " }", " return false;", " }", "", " @Override public int hashCode() {", " int h$ = 1;", " h$ *= 1000003;", " h$ ^= anInt;", " h$ *= 1000003;", " h$ ^= Arrays.hashCode(aByteArray);", " h$ *= 1000003;", " h$ ^= Arrays.hashCode(aNullableIntArray);", " h$ *= 1000003;", " h$ ^= aList.hashCode();", " h$ *= 1000003;", " h$ ^= anImmutableMap.hashCode();", " h$ *= 1000003;", " h$ ^= anOptionalString.hashCode();", " h$ *= 1000003;", " h$ ^= aNestedAutoValue.hashCode();", " return h$;", " }", "", " @Override public Baz.Builder<T> toBuilder() {", " return new Builder<T>(this);", " }", "", " static final class Builder<T extends Number> extends Baz.Builder<T> {", " private int anInt;", " private byte[] aByteArray;", " private int[] aNullableIntArray;", " private List<T> aList;", " private ImmutableMap.Builder<T, String> anImmutableMapBuilder$;", " private ImmutableMap<T, String> anImmutableMap;", " private Optional<String> anOptionalString = Optional.absent();", " private NestedAutoValue.Builder<T> aNestedAutoValueBuilder$;", " private NestedAutoValue<T> aNestedAutoValue;", " private byte set$0;", "", " Builder() {", " }", "", " private Builder(Baz<T> source) {", " this.anInt = source.anInt();", " this.aByteArray = source.aByteArray();", " this.aNullableIntArray = source.aNullableIntArray();", " this.aList = source.aList();", " this.anImmutableMap = source.anImmutableMap();", " this.anOptionalString = source.anOptionalString();", " this.aNestedAutoValue = source.aNestedAutoValue();", " set$0 = (byte) 0x1;", " }", "", " @Override", " public Baz.Builder<T> anInt(int anInt) {", " this.anInt = anInt;", " set$0 |= 0x1", " return this;", " }", "", " @Override", " public Optional<Integer> anInt() {", " if ((set$0 & 0x1) == 0) {", " return Optional.absent();", " }", " return Optional.of(anInt);", " }", "", " @Override", " public Baz.Builder<T> aByteArray(byte[] aByteArray) {", " if (aByteArray == null) {", " throw new NullPointerException(\"Null aByteArray\");", " }", " this.aByteArray = aByteArray;", " return this;", " }", "", " @Override", " public Baz.Builder<T> aNullableIntArray(@Nullable int[] aNullableIntArray) {", " this.aNullableIntArray = aNullableIntArray;", " return this;", " }", "", " @Override", " public Baz.Builder<T> aList(List<T> aList) {", " if (aList == null) {", " throw new NullPointerException(\"Null aList\");", " }", " this.aList = aList;", " return this;", " }", "", " @Override", " public List<T> aList() {", " if (this.aList == null) {", " throw new IllegalStateException(\"Property \\\"aList\\\" has not been set\");", " }", " return aList;", " }", "", " @Override", " public Baz.Builder<T> anImmutableMap(Map<T, String> anImmutableMap) {", " if (anImmutableMapBuilder$ != null) {", " throw new IllegalStateException(" + "\"Cannot set anImmutableMap after calling anImmutableMapBuilder()\");", " }", " this.anImmutableMap = ImmutableMap.copyOf(anImmutableMap);", " return this;", " }", "", " @Override", " public ImmutableMap.Builder<T, String> anImmutableMapBuilder() {", " if (anImmutableMapBuilder$ == null) {", " if (anImmutableMap == null) {", " anImmutableMapBuilder$ = ImmutableMap.builder();", " } else {", " anImmutableMapBuilder$ = ImmutableMap.builder();", " anImmutableMapBuilder$.putAll(anImmutableMap);", " anImmutableMap = null;", " }", " }", " return anImmutableMapBuilder$;", " }", "", " @Override", " public ImmutableMap<T, String> anImmutableMap() {", " if (anImmutableMapBuilder$ != null) {", " return anImmutableMapBuilder$.buildOrThrow();", " }", " if (anImmutableMap == null) {", " anImmutableMap = ImmutableMap.of();", " }", " return anImmutableMap;", " }", "", " @Override", " public Baz.Builder<T> anOptionalString(Optional<String> anOptionalString) {", " if (anOptionalString == null) {", " throw new NullPointerException(\"Null anOptionalString\");", " }", " this.anOptionalString = anOptionalString;", " return this;", " }", "", " @Override", " public Baz.Builder<T> anOptionalString(String anOptionalString) {", " this.anOptionalString = Optional.of(anOptionalString);", " return this;", " }", "", " @Override", " public NestedAutoValue.Builder<T> aNestedAutoValueBuilder() {", " if (aNestedAutoValueBuilder$ == null) {", " if (aNestedAutoValue == null) {", " aNestedAutoValueBuilder$ = NestedAutoValue.builder();", " } else {", " aNestedAutoValueBuilder$ = aNestedAutoValue.toBuilder();", " aNestedAutoValue = null;", " }", " }", " return aNestedAutoValueBuilder$;", " }", "", " @Override", " public Baz<T> build() {", " if (anImmutableMapBuilder$ != null) {", " this.anImmutableMap = anImmutableMapBuilder$.buildOrThrow();", " } else if (this.anImmutableMap == null) {", " this.anImmutableMap = ImmutableMap.of();", " }", " if (aNestedAutoValueBuilder$ != null) {", " this.aNestedAutoValue = aNestedAutoValueBuilder$.build();", " } else if (this.aNestedAutoValue == null) {", " NestedAutoValue.Builder<T> aNestedAutoValue$builder = " + "NestedAutoValue.builder();", " this.aNestedAutoValue = aNestedAutoValue$builder.build();", " }", " if (set$0 != 0x1", " || this.aByteArray == null", " || this.aList == null) {", " StringBuilder missing = new StringBuilder();", " if ((set$0 & 0x1) == 0) {", " missing.append(\" anInt\");", " }", " if (this.aByteArray == null) {", " missing.append(\" aByteArray\");", " }", " if (this.aList == null) {", " missing.append(\" aList\");", " }", " throw new IllegalStateException(\"Missing required properties:\" + missing);", " }", " return new AutoValue_Baz<T>(", " this.anInt,", " this.aByteArray,", " this.aNullableIntArray,", " this.aList,", " this.anImmutableMap,", " this.anOptionalString,", " this.aNestedAutoValue);", " }", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions( "-Xlint:-processing", "-implicit:none", "-A" + Nullables.NULLABLE_OPTION + "=") .compile(javaFileObject, nestedJavaFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .hasSourceEquivalentTo(expectedOutput); } @Test public void autoValueBuilderOnTopLevelClass() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Builder", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue.Builder", "public interface Builder {", " Builder foo(int x);", " Object build();", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("can only be applied to a class or interface inside") .inFile(javaFileObject) .onLineContaining("public interface Builder"); } @Test public void autoValueBuilderNotInsideAutoValue() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "public abstract class Baz {", " abstract int foo();", "", " static Builder builder() {", " return new AutoValue_Baz.Builder();", " }", "", " @AutoValue.Builder", " public interface Builder {", " Builder foo(int x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("can only be applied to a class or interface inside") .inFile(javaFileObject) .onLineContaining("public interface Builder"); } @Test public void autoValueBuilderNotStatic() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Example", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "class Example {", " @AutoValue", " abstract static class Baz {", " abstract int foo();", "", " static Builder builder() {", " return new AutoValue_Example_Baz.Builder();", " }", "", " @AutoValue.Builder", " abstract class Builder {", " abstract Builder foo(int x);", " abstract Baz build();", " }", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue.Builder cannot be applied to a non-static class") .inFile(javaFileObject) .onLineContaining("abstract class Builder"); } @Test public void autoValueBuilderMustHaveNoArgConstructor() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Example", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "class Example {", " @AutoValue", " abstract static class Baz {", " abstract int foo();", "", " static Builder builder() {", " return new AutoValue_Example_Baz.Builder();", " }", "", " @AutoValue.Builder", " abstract static class Builder {", " Builder(int defaultFoo) {}", " abstract Builder foo(int x);", " abstract Baz build();", " }", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("@AutoValue.Builder class must have a non-private no-arg constructor") .inFile(javaFileObject) .onLineContaining("class Builder"); } @Test public void autoValueBuilderOnEnum() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int foo();", "", " static Builder builder() {", " return null;", " }", "", " @AutoValue.Builder", " public enum Builder {}", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("can only apply to a class or an interface") .inFile(javaFileObject) .onLineContaining("public enum Builder"); } @Test public void autoValueBuilderDuplicate() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " @AutoValue.Builder", " public interface Builder1 {", " Baz build();", " }", "", " @AutoValue.Builder", " public interface Builder2 {", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("already has a Builder: foo.bar.Baz.Builder1") .inFile(javaFileObject) .onLineContaining("public interface Builder2"); } @Test public void autoValueBuilderMissingSetter() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int blim();", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("with this signature: foo.bar.Baz.Builder blim(int)") .inFile(javaFileObject) .onLineContaining("public interface Builder"); } @Test public void autoValueBuilderMissingSetterUsingSetPrefix() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int blim();", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder setBlam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("with this signature: foo.bar.Baz.Builder setBlim(int)") .inFile(javaFileObject) .onLineContaining("public interface Builder"); } @Test public void autoValueBuilderWrongTypeSetter() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int blim();", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(String x);", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Parameter type java.lang.String of setter method should be int " + "to match property method foo.bar.Baz.blim()") .inFile(javaFileObject) .onLineContaining("Builder blim(String x)"); } @Test public void autoValueBuilderWrongTypeSetterWithCopyOf() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "", "@AutoValue", "public abstract class Baz {", " abstract String blim();", " abstract ImmutableList<String> blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(String x);", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Parameter type java.lang.String of setter method should be" + " com.google.common.collect.ImmutableList<java.lang.String> to match property" + " method foo.bar.Baz.blam(), or it should be a type that can be passed to" + " ImmutableList.copyOf") .inFile(javaFileObject) .onLineContaining("Builder blam(String x)"); } @Test public void autoValueBuilderWrongTypeSetterWithCopyOfGenericallyWrong() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "import java.util.Collection;", "", "@AutoValue", "public abstract class Baz {", " abstract String blim();", " abstract ImmutableList<String> blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(String x);", " Builder blam(Collection<Integer> x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Parameter type java.util.Collection<java.lang.Integer> of setter method should be" + " com.google.common.collect.ImmutableList<java.lang.String> to match property" + " method foo.bar.Baz.blam(), or it should be a type that can be passed to" + " ImmutableList.copyOf to produce" + " com.google.common.collect.ImmutableList<java.lang.String>") .inFile(javaFileObject) .onLineContaining("Builder blam(Collection<Integer> x)"); } @Test public void autoValueBuilderWrongTypeSetterWithGetPrefix() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int getBlim();", " abstract String getBlam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(String x);", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Parameter type java.lang.String of setter method should be int " + "to match property method foo.bar.Baz.getBlim()") .inFile(javaFileObject) .onLineContaining("Builder blim(String x)"); } @Test public void autoValueBuilderNullableSetterForNonNullable() { JavaFileObject nullableFileObject = JavaFileObjects.forSourceLines( "foo.bar.Nullable", "package foo.bar;", "", "import java.lang.annotation.ElementType;", "import java.lang.annotation.Target;", "", "@Target(ElementType.TYPE_USE)", "public @interface Nullable {}"); JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String notNull();", "", " @AutoValue.Builder", " public interface Builder {", " Builder setNotNull(@Nullable String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject, nullableFileObject); assertThat(compilation) .hadErrorContaining( "Parameter of setter method is @Nullable but property method" + " foo.bar.Baz.notNull() is not") .inFile(javaFileObject) .onLineContaining("setNotNull"); } // Check that we get a helpful error message if some of your properties look like getters but // others don't. @Test public void autoValueBuilderBeansConfusion() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Item", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Item {", " abstract String getTitle();", " abstract boolean hasThumbnail();", "", " @AutoValue.Builder", " public interface Builder {", " Builder setTitle(String title);", " Builder setHasThumbnail(boolean t);", " Item build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method setTitle does not correspond to a property method of foo.bar.Item") .inFile(javaFileObject) .onLineContaining("Builder setTitle(String title)"); assertThat(compilation) .hadNoteContaining("hasThumbnail") .inFile(javaFileObject) .onLineContaining("Builder setTitle(String title)"); } @Test public void autoValueBuilderExtraSetter() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(int x);", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Method blim does not correspond to a property method of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("Builder blim(int x)"); } @Test public void autoValueBuilderSetPrefixAndNoSetPrefix() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int blim();", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blim(int x);", " Builder setBlam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("If any setter methods use the setFoo convention then all must") .inFile(javaFileObject) .onLineContaining("Builder blim(int x)"); } @Test public void autoValueBuilderSetterReturnType() { // We do allow the return type of a setter to be a supertype of the builder type, to support // step builders. But we don't allow it to be Object. JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int blim();", "", " @AutoValue.Builder", " public interface Builder {", " Object blim(int x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Setter methods must return foo.bar.Baz.Builder") .inFile(javaFileObject) .onLineContaining("Object blim(int x)"); } @Test public void autoValueBuilderWrongTypeGetter() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract T blim();", " abstract U blam();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " Builder<T, U> blim(T x);", " Builder<T, U> blam(U x);", " T blim();", " T blam();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContainingMatch( "Method matches a property of foo\\.bar\\.Baz<T, ?U> but has return type T instead of" + " U") .inFile(javaFileObject) .onLineContaining("T blam()"); // The <T, ?U> is because we're depending on TypeMirror.toString(), and the JDK actually spells // this as <T,U> with no space. While it's not completely sound to expect a given string from // TypeMirror.toString(), in practice it's hard to imagine that it would be anything other // than "foo.bar.Baz<T,U>" or "foo.bar.Baz<T, U>" given the specification. } @Test public void autoValueBuilderPropertyBuilderInvalidType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract String blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " StringBuilder blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but it returns java.lang.StringBuilder which " + "does not have a non-static build() or buildOrThrow() method") .inFile(javaFileObject) .onLineContaining("StringBuilder blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderNullable() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "", "@AutoValue", "public abstract class Baz<T, U> {", " @interface Nullable {}", " abstract @Nullable ImmutableList<String> strings();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " ImmutableList.Builder<String> stringsBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Property strings is @Nullable so it cannot have a property builder") .inFile(javaFileObject) .onLineContaining("stringsBuilder()"); } @Test public void autoValueBuilderPropertyBuilderNullableType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "import java.lang.annotation.ElementType;", "import java.lang.annotation.Target;", "", "@AutoValue", "public abstract class Baz<T, U> {", " @Target(ElementType.TYPE_USE)", " @interface Nullable {}", " abstract @Nullable ImmutableList<String> strings();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " ImmutableList.Builder<String> stringsBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Property strings is @Nullable so it cannot have a property builder") .inFile(javaFileObject) .onLineContaining("stringsBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWrongCollectionType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract ImmutableList<T> blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " ImmutableSet.Builder<T> blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder for blim has type com.google.common.collect.ImmutableSet.Builder " + "whose build() method returns com.google.common.collect.ImmutableSet<T> " + "instead of com.google.common.collect.ImmutableList<T>") .inFile(javaFileObject) .onLineContaining("ImmutableSet.Builder<T> blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWeirdBuilderType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract Integer blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " int blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but its return type is not a class or interface") .inFile(javaFileObject) .onLineContaining("int blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWeirdBuiltType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract int blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " Integer blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but the type of property blim is not a class " + "or interface") .inFile(javaFileObject) .onLineContaining("Integer blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderHasNoBuild() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract String blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " StringBuilder blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but it returns java.lang.StringBuilder which " + "does not have a non-static build() or buildOrThrow() method") .inFile(javaFileObject) .onLineContaining("StringBuilder blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderHasStaticBuild() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract String blim();", "", " public static class StringFactory {", " public static String build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " StringFactory blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but it returns foo.bar.Baz.StringFactory which " + "does not have a non-static build() or buildOrThrow() method") .inFile(javaFileObject) .onLineContaining("StringFactory blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderReturnsWrongType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "import java.util.List;", "", "@AutoValue", "public abstract class Baz<E> {", " abstract List<E> blim();", "", " public static class ListFactory<E> {", " public List<? extends E> build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<E> {", " ListFactory<E> blimBuilder();", " Baz<E> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder for blim has type foo.bar.Baz.ListFactory whose build() method " + "returns java.util.List<? extends E> instead of java.util.List<E>") .inFile(javaFileObject) .onLineContaining("ListFactory<E> blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderCantConstruct() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<E> {", " abstract String blim();", "", " public static class StringFactory {", " private StringFactory() {}", "", " public String build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<E> {", " StringFactory blimBuilder();", " Baz<E> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method looks like a property builder, but its type foo.bar.Baz.StringFactory " + "does not have a public constructor and java.lang.String does not have a static " + "builder() or newBuilder() method that returns foo.bar.Baz.StringFactory") .inFile(javaFileObject) .onLineContaining("StringFactory blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderCantReconstruct() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<E> {", " abstract String blim();", " abstract Builder<E> toBuilder();", "", " public static class StringFactory {", " public String build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<E> {", " StringFactory blimBuilder();", " Baz<E> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder method returns foo.bar.Baz.StringFactory but there is no way to make" + " that type from java.lang.String: java.lang.String does not have a non-static" + " toBuilder() method that returns foo.bar.Baz.StringFactory, and" + " foo.bar.Baz.StringFactory does not have a method addAll or putAll that accepts" + " an argument of type java.lang.String") .inFile(javaFileObject) .onLineContaining("StringFactory blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWrongTypeAddAll() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "import java.util.Iterator;", "", "@AutoValue", "public abstract class Baz<T> {", " abstract ImmutableSet<String> strings();", " abstract Builder<T> toBuilder();", "", " public static class ImmutableSetBuilder<E> {", " public void addAll(Iterator<? extends E> elements) {}", "", " public ImmutableSet<E> build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<T> {", " ImmutableSetBuilder<String> stringsBuilder();", " Baz<T> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder method returns foo.bar.Baz.ImmutableSetBuilder<java.lang.String> but" + " there is no way to make that type from" + " com.google.common.collect.ImmutableSet<java.lang.String>:" + " com.google.common.collect.ImmutableSet<java.lang.String> does not have a" + " non-static toBuilder() method that returns" + " foo.bar.Baz.ImmutableSetBuilder<java.lang.String>, and" + " foo.bar.Baz.ImmutableSetBuilder<java.lang.String> does not have a method" + " addAll or putAll that accepts an argument of type" + " com.google.common.collect.ImmutableSet<java.lang.String>") .inFile(javaFileObject) .onLineContaining("ImmutableSetBuilder<String> stringsBuilder();"); } @Test public void autoValueBuilderPropertyBuilderCantSet() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<E> {", " abstract String blim();", "", " public static class StringFactory {", " public String build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<E> {", " Builder<E> setBlim(String s);", " StringFactory blimBuilder();", " Baz<E> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder method returns foo.bar.Baz.StringFactory but there is no way to make " + "that type from java.lang.String: java.lang.String does not have a non-static " + "toBuilder() method that returns foo.bar.Baz.StringFactory") .inFile(javaFileObject) .onLineContaining("StringFactory blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWrongTypeToBuilder() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<E> {", " abstract Buh blim();", " abstract Builder<E> toBuilder();", "", " public static class Buh {", " StringBuilder toBuilder() {", " return null;", " }", " }", "", " public static class BuhBuilder {", " public Buh build() {", " return null;", " }", " }", "", " @AutoValue.Builder", " public interface Builder<E> {", " BuhBuilder blimBuilder();", " Baz<E> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder method returns foo.bar.Baz.BuhBuilder but there is no way to make " + "that type from foo.bar.Baz.Buh: foo.bar.Baz.Buh does not have a non-static " + "toBuilder() method that returns foo.bar.Baz.BuhBuilder") .inFile(javaFileObject) .onLineContaining("BuhBuilder blimBuilder()"); } @Test public void autoValueBuilderPropertyBuilderWrongElementType() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableSet;", "", "@AutoValue", "public abstract class Baz<T, U> {", " abstract ImmutableSet<T> blim();", "", " @AutoValue.Builder", " public interface Builder<T, U> {", " ImmutableSet.Builder<U> blimBuilder();", " Baz<T, U> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Property builder for blim has type com.google.common.collect.ImmutableSet.Builder " + "whose build() method returns com.google.common.collect.ImmutableSet<U> " + "instead of com.google.common.collect.ImmutableSet<T>") .inFile(javaFileObject) .onLineContaining("ImmutableSet.Builder<U> blimBuilder()"); } @Test public void autoValueBuilderAlienMethod0() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x);", " Builder whut();", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method without arguments should be a build method returning foo.bar.Baz, or a getter" + " method with the same name and type as a property method of foo.bar.Baz, or" + " fooBuilder() where foo() or getFoo() is a property method of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("Builder whut()"); } @Test public void autoValueBuilderAlienMethod1() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " void whut(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Method whut does not correspond to a property method of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("void whut(String x)"); } @Test public void autoValueBuilderAlienMethod2() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x, String y);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Builder methods must have 0 or 1 parameters") .inFile(javaFileObject) .onLineContaining("Builder blam(String x, String y)"); } @Test public void autoValueBuilderMissingBuildMethod() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T> {", " abstract T blam();", "", " @AutoValue.Builder", " public interface Builder<T> {", " Builder<T> blam(T x);", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Builder must have a single no-argument method, typically called build(), that returns" + " foo.bar.Baz<T>") .inFile(javaFileObject) .onLineContaining("public interface Builder<T>"); } @Test public void autoValueBuilderDuplicateBuildMethods() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x);", " Baz build();", " Baz create();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Builder must have a single no-argument method, typically called build(), that returns" + " foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("Baz build()"); assertThat(compilation) .hadErrorContaining( "Builder must have a single no-argument method, typically called build(), that returns" + " foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("Baz create()"); } @Test public void autoValueBuilderWrongTypeBuildMethod() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x);", " String build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Method without arguments should be a build method returning foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("String build()"); } @Test public void autoValueBuilderTypeParametersDontMatch1() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T> {", " abstract String blam();", "", " @AutoValue.Builder", " public interface Builder {", " Builder blam(String x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Type parameters of foo.bar.Baz.Builder must have same names and " + "bounds as type parameters of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("public interface Builder"); } @Test public void autoValueBuilderTypeParametersDontMatch2() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T> {", " abstract T blam();", "", " @AutoValue.Builder", " public interface Builder<E> {", " Builder<E> blam(E x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Type parameters of foo.bar.Baz.Builder must have same names and " + "bounds as type parameters of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("public interface Builder<E>"); } @Test public void autoValueBuilderTypeParametersDontMatch3() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz<T extends Number & Comparable<T>> {", " abstract T blam();", "", " @AutoValue.Builder", " public interface Builder<T extends Number> {", " Builder<T> blam(T x);", " Baz build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining( "Type parameters of foo.bar.Baz.Builder must have same names and " + "bounds as type parameters of foo.bar.Baz") .inFile(javaFileObject) .onLineContaining("public interface Builder<T extends Number>"); } @Test public void autoValueBuilderToBuilderWrongTypeParameters() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "abstract class Baz<K extends Comparable<K>, V> {", " abstract K key();", " abstract V value();", " abstract Builder<V, K> toBuilder1();", "", " @AutoValue.Builder", " interface Builder<K extends Comparable<K>, V> {", " Builder<K, V> key(K key);", " Builder<K, V> value(V value);", " Baz<K, V> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("Builder converter method should return foo.bar.Baz.Builder<K, V>") .inFile(javaFileObject) .onLineContaining("abstract Builder<V, K> toBuilder1()"); } @Test public void autoValueBuilderToBuilderDuplicate() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "abstract class Baz<K extends Comparable<K>, V> {", " abstract K key();", " abstract V value();", " abstract Builder<K, V> toBuilder1();", " abstract Builder<K, V> toBuilder2();", "", " @AutoValue.Builder", " interface Builder<K extends Comparable<K>, V> {", " Builder<K, V> key(K key);", " Builder<K, V> value(V value);", " Baz<K, V> build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("There can be at most one builder converter method") .inFile(javaFileObject) .onLineContaining("abstract Builder<K, V> toBuilder1()"); } @Test public void getFooIsFoo() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " abstract int getFoo();", " abstract boolean isFoo();", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new AutoValueBuilderProcessor()) .compile(javaFileObject); assertThat(compilation) .hadErrorContaining("More than one @AutoValue property called foo") .inFile(javaFileObject) .onLineContaining("getFoo"); assertThat(compilation) .hadErrorContaining("More than one @AutoValue property called foo") .inFile(javaFileObject) .onLineContaining("isFoo"); } @Retention(RetentionPolicy.SOURCE) public @interface Foo {} /* Processor that generates an empty class BarFoo every time it sees a class Bar annotated with * @Foo. */ public static class FooProcessor extends AbstractProcessor { @Override public Set<String> getSupportedAnnotationTypes() { return ImmutableSet.of(Foo.class.getCanonicalName()); } @Override public SourceVersion getSupportedSourceVersion() { return SourceVersion.latestSupported(); } @Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { Set<? extends Element> elements = roundEnv.getElementsAnnotatedWith(Foo.class); for (TypeElement type : ElementFilter.typesIn(elements)) { try { generateFoo(type); } catch (IOException e) { throw new AssertionError(e); } } return false; } private void generateFoo(TypeElement type) throws IOException { String pkg = TypeSimplifier.packageNameOf(type); String className = type.getSimpleName().toString(); String generatedClassName = className + "Foo"; JavaFileObject source = processingEnv.getFiler().createSourceFile(pkg + "." + generatedClassName, type); PrintWriter writer = new PrintWriter(source.openWriter()); writer.println("package " + pkg + ";"); writer.println("public class " + generatedClassName + " {}"); writer.close(); } } @Test public void referencingGeneratedClass() { // Test that ensures that a type that does not exist can be the type of an @AutoValue property // as long as it later does come into existence. The BarFoo type referenced here does not exist // when the AutoValueProcessor runs on the first round, but the FooProcessor then generates it. // That generation provokes a further round of annotation processing and AutoValueProcessor // should succeed then. JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " public abstract BarFoo barFoo();", "", " public static Baz create(BarFoo barFoo) {", " return new AutoValue_Baz(barFoo);", " }", "}"); JavaFileObject barFileObject = JavaFileObjects.forSourceLines( "foo.bar.Bar", "package foo.bar;", "", "@" + Foo.class.getCanonicalName(), "public abstract class Bar {", " public abstract BarFoo barFoo();", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new FooProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, barFileObject); assertThat(compilation).succeededWithoutWarnings(); } @Test public void referencingGeneratedClassInAnnotation() { // Test that ensures that a type that does not exist can be referenced by a copied annotation // as long as it later does come into existence. The BarFoo type referenced here does not exist // when the AutoValueProcessor runs on the first round, but the FooProcessor then generates it. // That generation provokes a further round of annotation processing and AutoValueProcessor // should succeed then. // We test the three places that a class reference could appear: as the value of a Class // element, as the value of a Class[] element, in a nested annotation. JavaFileObject barFileObject = JavaFileObjects.forSourceLines( "foo.bar.Bar", "package foo.bar;", "", "@" + Foo.class.getCanonicalName(), "public abstract class Bar {", "}"); JavaFileObject referenceClassFileObject = JavaFileObjects.forSourceLines( "foo.bar.ReferenceClass", "package foo.bar;", "", "@interface ReferenceClass {", " Class<?> value() default Void.class;", " Class<?>[] values() default {};", " Nested nested() default @Nested;", " @interface Nested {", " Class<?>[] values() default {};", " }", "}"); ImmutableList<String> annotations = ImmutableList.of( "@ReferenceClass(BarFoo.class)", "@ReferenceClass(values = {Void.class, BarFoo.class})", "@ReferenceClass(nested = @ReferenceClass.Nested(values = {Void.class, BarFoo.class}))"); for (String annotation : annotations) { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "@AutoValue.CopyAnnotations", annotation, "public abstract class Baz {", " public abstract int foo();", "", " public static Baz create(int foo) {", " return new AutoValue_Baz(foo);", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor(), new FooProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, barFileObject, referenceClassFileObject); expect.about(compilations()).that(compilation).succeededWithoutWarnings(); if (compilation.status().equals(Compilation.Status.SUCCESS)) { expect.about(compilations()).that(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .contentsAsUtf8String() .contains(annotation); } } } @Test public void annotationReferencesUndefined() { // Test that we don't throw an exception if asked to compile @SuppressWarnings(UNDEFINED) // where UNDEFINED is an undefined symbol. JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " @SuppressWarnings(UNDEFINED)", " public abstract int[] buh();", "}"); Compilation compilation1 = javac() .withOptions("-Xlint:-processing") .withProcessors(new AutoValueProcessor()) .compile(bazFileObject); assertThat(compilation1).hadErrorCount(1); assertThat(compilation1) .hadErrorContaining("UNDEFINED") .inFile(bazFileObject) .onLineContaining("UNDEFINED"); assertThat(compilation1).hadWarningCount(1); assertThat(compilation1) .hadWarningContaining("mutable") .inFile(bazFileObject) .onLineContaining("public abstract int[] buh()"); // Same test, except we do successfully suppress the warning despite the UNDEFINED. bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz {", " @SuppressWarnings({UNDEFINED, \"mutable\"})", " public abstract int[] buh();", "}"); Compilation compilation2 = javac() .withOptions("-Xlint:-processing") .withProcessors(new AutoValueProcessor()) .compile(bazFileObject); assertThat(compilation2).hadErrorCount(1); assertThat(compilation2) .hadErrorContaining("UNDEFINED") .inFile(bazFileObject) .onLineContaining("UNDEFINED"); assertThat(compilation2).hadWarningCount(0); } @Test public void packagePrivateAnnotationFromOtherPackage() { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz extends otherpackage.Parent {", "}"); JavaFileObject parentFileObject = JavaFileObjects.forSourceLines( "otherpackage.Parent", "package otherpackage;", "", "public abstract class Parent {", " @PackageAnnotation", " public abstract String foo();", "", " @interface PackageAnnotation {}", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, parentFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation).generatedSourceFile("foo.bar.AutoValue_Baz"); } @Test public void visibleProtectedAnnotationFromOtherPackage() { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz extends otherpackage.Parent {}"); JavaFileObject parentFileObject = JavaFileObjects.forSourceLines( "otherpackage.Parent", "package otherpackage;", "", "public abstract class Parent {", " @ProtectedAnnotation", " public abstract String foo();", "", " protected @interface ProtectedAnnotation {}", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, parentFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .contentsAsUtf8String() .containsMatch("(?s:@Parent.ProtectedAnnotation\\s*@Override\\s*public String foo\\(\\))"); } @Test public void methodAnnotationsCopiedInLexicographicalOrder() { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.package1.Annotation1;", "import com.package2.Annotation0;", "", "@AutoValue", "public abstract class Baz extends Parent {", " @Annotation0", " @Annotation1", " @Override", " public abstract String foo();", "}"); JavaFileObject parentFileObject = JavaFileObjects.forSourceLines( "foo.bar.Parent", "package foo.bar;", "", "public abstract class Parent {", " public abstract String foo();", "}"); JavaFileObject annotation1FileObject = JavaFileObjects.forSourceLines( "com.package1.Annotation1", "package com.package1;", "", "import java.lang.annotation.ElementType;", "import java.lang.annotation.Target;", "", "@Target({ElementType.FIELD, ElementType.METHOD})", "public @interface Annotation1 {}"); JavaFileObject annotation0FileObject = JavaFileObjects.forSourceLines( "com.package2.Annotation0", "package com.package2;", "", "public @interface Annotation0 {}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, parentFileObject, annotation1FileObject, annotation0FileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .contentsAsUtf8String() .containsMatch( "(?s:@Annotation1\\s+@Annotation0\\s+@Override\\s+public String foo\\(\\))"); // @Annotation1 precedes @Annotation 0 because // @com.package2.Annotation1 precedes @com.package1.Annotation0 } @Test public void nonVisibleProtectedAnnotationFromOtherPackage() { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Baz", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Baz extends otherpackage.Parent {", "}"); JavaFileObject parentFileObject = JavaFileObjects.forSourceLines( "otherpackage.Parent", "package otherpackage;", "", "import otherpackage.Annotations.ProtectedAnnotation;", "", "public abstract class Parent {", " @ProtectedAnnotation", " public abstract String foo();", "}"); JavaFileObject annotationsFileObject = JavaFileObjects.forSourceLines( "otherpackage.Annotations", "package otherpackage;", "", "public class Annotations {", " protected @interface ProtectedAnnotation {}", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, parentFileObject, annotationsFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Baz") .contentsAsUtf8String() .doesNotContain("ProtectedAnnotation"); } @Test public void nonVisibleProtectedClassAnnotationFromOtherPackage() { JavaFileObject bazFileObject = JavaFileObjects.forSourceLines( "foo.bar.Outer", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "class Outer extends otherpackage.Parent {", " @AutoValue", " @AutoValue.CopyAnnotations", " @ProtectedAnnotation", " abstract static class Inner {", " abstract String foo();", " }", "}"); JavaFileObject parentFileObject = JavaFileObjects.forSourceLines( "otherpackage.Parent", "package otherpackage;", "", "public abstract class Parent {", " protected @interface ProtectedAnnotation {}", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(bazFileObject, parentFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Outer_Inner") .contentsAsUtf8String() .doesNotContain("ProtectedAnnotation"); } @Test public void builderWithVarArgsDoesNotImportJavaUtilArrays() { // Repro from https://github.com/google/auto/issues/373. JavaFileObject testFileObject = JavaFileObjects.forSourceLines( "foo.bar.Test", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import com.google.common.collect.ImmutableList;", "", "@AutoValue", "public abstract class Test {", " abstract ImmutableList<String> foo();", "", " @AutoValue.Builder", " abstract static class Builder {", " abstract Builder foo(String... foos);", " abstract Test build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(testFileObject); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Test") .contentsAsUtf8String() .doesNotContain("java.util.Arrays"); } @Test public void staticBuilderMethodInBuilderClass() { JavaFileObject javaFileObject = JavaFileObjects.forSourceLines( "com.example.Foo", "package com.example;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Foo {", " public abstract String bar();", "", " @AutoValue.Builder", " public abstract static class Builder {", " public static Builder builder() {", " return new AutoValue_Foo.Builder();", " }", "", " public abstract Builder setBar(String s);", " public abstract Foo build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(javaFileObject); assertThat(compilation).succeeded(); assertThat(compilation) .hadWarningContaining("Static builder() method should be in the containing class") .inFile(javaFileObject) .onLineContaining("builder()"); } /** * Tests behaviour when the package containing an {@code @AutoValue} class also has classes with * the same name as classes in {@code java.lang}. If you call a class {@code Object} you are * asking for trouble, but you could innocently call a class {@code Compiler} without realizing * there is a {@code java.lang.Compiler}. * * <p>The case where the class in question is mentioned in the {@code @AutoValue} class is the * easy one, because then our logic can easily see that there is a clash and will use * fully-qualified names. This is the case of the {@code Compiler} class below. The case where the * class is <i>not</i> mentioned is harder. We have to realize that we can't elide the package * name in {@code java.lang.Object} because there is also a {@code foo.bar.Object} in scope, and * in fact it takes precedence. */ @Test public void javaLangClash() { JavaFileObject object = JavaFileObjects.forSourceLines( "foo.bar.Object", // "package foo.bar;", "", "public class Object {}"); JavaFileObject string = JavaFileObjects.forSourceLines( "foo.bar.String", // "package foo.bar;", "", "public class String {}"); JavaFileObject integer = JavaFileObjects.forSourceLines( "foo.bar.Integer", // "package foo.bar;", "", "public class Integer {}"); JavaFileObject thread = JavaFileObjects.forSourceLines( "foo.bar.Thread", // "package foo.bar;", "", "public class Thread {}"); JavaFileObject override = JavaFileObjects.forSourceLines( "foo.bar.Override", // "package foo.bar;", "", "public class Override {}"); JavaFileObject test = JavaFileObjects.forSourceLines( "foo.bar.Test", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "", "@AutoValue", "public abstract class Test {", " public abstract java.lang.Integer integer();", " public abstract java.lang.Thread.State state();", " public static Builder builder() {", " return new AutoValue_Test.Builder();", " }", "", " @AutoValue.Builder", " public abstract static class Builder {", " public abstract Builder setInteger(java.lang.Integer x);", " public abstract Builder setState(java.lang.Thread.State x);", " public abstract Test build();", " }", "}"); Compilation compilation = javac() .withProcessors(new AutoValueProcessor()) .withOptions("-Xlint:-processing", "-implicit:none") .compile(object, string, integer, thread, override, test); assertThat(compilation).succeededWithoutWarnings(); } // This is a regression test for the problem described in // https://github.com/google/auto/issues/847#issuecomment-629857642. @Test public void generatedParentWithGeneratedGetterButSetterInBuilder() { JavaFileObject test = JavaFileObjects.forSourceLines( "foo.bar.Test", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import foo.baz.GeneratedParent;", "import foo.baz.GeneratedPropertyType;", "import java.util.Optional;", "", "@AutoValue", "public abstract class Test extends GeneratedParent {", " public abstract String string();", "", " public static Builder builder() {", " return new AutoValue_Test.Builder();", " }", "", " @AutoValue.Builder", " public abstract static class Builder extends GeneratedParent.Builder<Builder> {", " public abstract Builder setString(String x);", " public abstract Builder setGenerated(GeneratedPropertyType x);", " public abstract Test build();", " }", "}"); AutoValueProcessor autoValueProcessor = new AutoValueProcessor(); GeneratedParentProcessor generatedParentProcessor = new GeneratedParentProcessor(autoValueProcessor, expect); Compilation compilation = javac() .withProcessors(autoValueProcessor, generatedParentProcessor) .withOptions("-Xlint:-processing", "-implicit:none") .compile(test); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Test") .contentsAsUtf8String() .contains(" public int integer() {"); } @SupportedAnnotationTypes("*") private static class GeneratedParentProcessor extends AbstractProcessor { private static final String GENERATED_PARENT = String.join( "\n", "package foo.baz;", "", "public abstract class GeneratedParent {", " public abstract int integer();", " public abstract GeneratedPropertyType generated();", "", " public abstract static class Builder<B extends Builder<B>> {", " public abstract B setInteger(int x);", " }", "}"); private static final String GENERATED_PROPERTY_TYPE = String.join( "\n", // "package foo.baz;", "", "public class GeneratedPropertyType {}"); private static final ImmutableMap<String, String> GENERATED_TYPES = ImmutableMap.of( "foo.baz.GeneratedParent", GENERATED_PARENT, "foo.baz.GeneratedPropertyType", GENERATED_PROPERTY_TYPE); private final AutoValueProcessor autoValueProcessor; private final Expect expect; GeneratedParentProcessor(AutoValueProcessor autoValueProcessor, Expect expect) { this.autoValueProcessor = autoValueProcessor; this.expect = expect; } private boolean generated; @Override public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { if (!generated) { generated = true; // Check that AutoValueProcessor has already run and deferred the foo.bar.Test type because // we haven't generated its parent yet. expect.that(autoValueProcessor.deferredTypeNames()).contains("foo.bar.Test"); GENERATED_TYPES.forEach( (typeName, source) -> { try { JavaFileObject generated = processingEnv.getFiler().createSourceFile(typeName); try (Writer writer = generated.openWriter()) { writer.write(source); } } catch (IOException e) { throw new UncheckedIOException(e); } }); } return false; } @Override public SourceVersion getSupportedSourceVersion() { return SourceVersion.latestSupported(); } } // This is a regression test for the problem described in // https://github.com/google/auto/issues/1087. @Test public void kotlinMetadataAnnotationsAreImplicitlyExcludedFromCopying() { JavaFileObject metadata = JavaFileObjects.forSourceLines( "kotlin.Metadata", "package kotlin;", "", "public @interface Metadata {", "}"); JavaFileObject test = JavaFileObjects.forSourceLines( "foo.bar.Test", "package foo.bar;", "", "import com.google.auto.value.AutoValue;", "import kotlin.Metadata;", "", "@AutoValue.CopyAnnotations", "@Metadata", "@AutoValue", "public abstract class Test {", " public abstract String string();", "}"); AutoValueProcessor autoValueProcessor = new AutoValueProcessor(); Compilation compilation = javac() .withProcessors(autoValueProcessor) .withOptions("-Xlint:-processing", "-implicit:none") .compile(test, metadata); assertThat(compilation).succeededWithoutWarnings(); assertThat(compilation) .generatedSourceFile("foo.bar.AutoValue_Test") .contentsAsUtf8String() .doesNotContain("kotlin.Metadata"); } private String sorted(String... imports) { return Arrays.stream(imports).sorted().collect(joining("\n")); } }
apache-2.0
curit/dynamitey
Dynamitey/Internal/Optimization/InvokeHelper.cs
42310
 // // Copyright 2011 Ekon Benefits // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using System; using System.Collections.Generic; using System.Linq; using System.Runtime.CompilerServices; using System.Reflection; namespace Dynamitey.Internal.Optimization { internal static partial class InvokeHelper { internal static readonly Type[] FuncKinds; internal static readonly Type[] ActionKinds; internal static readonly Type[] TupleKinds; internal static readonly IDictionary<Type,int> FuncArgs; internal static readonly IDictionary<Type,int> ActionArgs; internal static readonly IDictionary<Type,int> TupleArgs; static InvokeHelper() { FuncKinds = new [] { typeof(Func<>), //0 typeof(Func<,>), //1 typeof(Func<,,>), //2 typeof(Func<,,,>), //3 typeof(Func<,,,,>), //4 typeof(Func<,,,,,>), //5 typeof(Func<,,,,,,>), //6 typeof(Func<,,,,,,,>), //7 typeof(Func<,,,,,,,,>), //8 typeof(Func<,,,,,,,,,>), //9 typeof(Func<,,,,,,,,,,>), //10 typeof(Func<,,,,,,,,,,,>), //11 typeof(Func<,,,,,,,,,,,,>), //12 typeof(Func<,,,,,,,,,,,,,>), //13 typeof(Func<,,,,,,,,,,,,,,>), //14 typeof(Func<,,,,,,,,,,,,,,,>), //15 typeof(Func<,,,,,,,,,,,,,,,,>), //16 }; ActionKinds = new [] { typeof(Action), //0 typeof(Action<>), //1 typeof(Action<,>), //2 typeof(Action<,,>), //3 typeof(Action<,,,>), //4 typeof(Action<,,,,>), //5 typeof(Action<,,,,,>), //6 typeof(Action<,,,,,,>), //7 typeof(Action<,,,,,,,>), //8 typeof(Action<,,,,,,,,>), //9 typeof(Action<,,,,,,,,,>), //10 typeof(Action<,,,,,,,,,,>), //11 typeof(Action<,,,,,,,,,,,>), //12 typeof(Action<,,,,,,,,,,,,>), //13 typeof(Action<,,,,,,,,,,,,,>), //14 typeof(Action<,,,,,,,,,,,,,,>), //15 typeof(Action<,,,,,,,,,,,,,,,>), //16 }; TupleKinds = new [] { typeof(Tuple<>), //1 typeof(Tuple<,>), //2 typeof(Tuple<,,>), //3 typeof(Tuple<,,,>), //4 typeof(Tuple<,,,,>), //5 typeof(Tuple<,,,,,>), //6 typeof(Tuple<,,,,,,>), //7 typeof(Tuple<,,,,,,,>), //8 }; FuncArgs = FuncKinds.Zip(Enumerable.Range(0, FuncKinds.Length), (key, value) => new { key, value }).ToDictionary(k => k.key, v => v.value); ActionArgs = ActionKinds.Zip(Enumerable.Range(0, ActionKinds.Length), (key, value) => new { key, value }).ToDictionary(k => k.key, v => v.value); TupleArgs = TupleKinds.Zip(Enumerable.Range(1, ActionKinds.Length), (key, value) => new { key, value }).ToDictionary(k => k.key, v => v.value); } internal static dynamic TupleItem(dynamic tuple, int index){ switch(index){ case 1: return tuple.Item1; case 2: return tuple.Item2; case 3: return tuple.Item3; case 4: return tuple.Item4; case 5: return tuple.Item5; case 6: return tuple.Item6; case 7: return tuple.Item7; default: return tuple.Rest; } } internal static void InvokeMemberAction(ref CallSite callsite, Type binderType, int knownType, LazyBinder binder, InvokeMemberName name, bool staticContext, Type context, string[] argNames, object target, params object [] args) { var tSwitch = args.Length; switch (tSwitch) { #region Optimizations case 0: { var tCallSite = (CallSite<Action<CallSite, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target); break; } case 1: { var tCallSite = (CallSite<Action<CallSite, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0]); break; } case 2: { var tCallSite = (CallSite<Action<CallSite, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1]); break; } case 3: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2]); break; } case 4: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3]); break; } case 5: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4]); break; } case 6: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5]); break; } case 7: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6]); break; } case 8: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7]); break; } case 9: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8]); break; } case 10: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9]); break; } case 11: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10]); break; } case 12: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11]); break; } case 13: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11], args[12]); break; } case 14: { var tCallSite = (CallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object>>)callsite; if(tCallSite == null){ tCallSite = CreateCallSite<Action<CallSite, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object>>(binderType,knownType, binder, name, context, argNames, staticContext); callsite=tCallSite; } tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11], args[12], args[13]); break; } #endregion default: var tArgTypes = Enumerable.Repeat(typeof(object), tSwitch); var tDelagateType = EmitCallSiteFuncType(tArgTypes, typeof(void)); Dynamic.InvokeCallSite(CreateCallSite(tDelagateType, binderType,knownType, binder, name, context, argNames), target, args); break; } } internal static TReturn InvokeMemberTargetType<TTarget,TReturn>( ref CallSite callsite, Type binderType, int knownType, LazyBinder binder, InvokeMemberName name, bool staticContext, Type context, string[] argNames, TTarget target, params object [] args) { var tSwitch = args.Length; switch (tSwitch) { #region Optimizations case 0: { var tCallSite = (CallSite<Func<CallSite, TTarget, TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target); } case 1: { var tCallSite = (CallSite<Func<CallSite, TTarget, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0]); } case 2: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1]); } case 3: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2]); } case 4: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3]); } case 5: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4]); } case 6: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5]); } case 7: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6]); } case 8: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7]); } case 9: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8]); } case 10: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9]); } case 11: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10]); } case 12: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11]); } case 13: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11], args[12]); } case 14: { var tCallSite = (CallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>)callsite; if(tCallSite==null){ tCallSite = CreateCallSite<Func<CallSite, TTarget, object, object, object, object, object, object, object, object, object, object, object, object, object, object,TReturn>>(binderType,knownType,binder, name, context, argNames, staticContext); callsite =tCallSite; } return tCallSite.Target(tCallSite, target, args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11], args[12], args[13]); } #endregion default: var tArgTypes = Enumerable.Repeat(typeof(object), tSwitch); var tDelagateType = EmitCallSiteFuncType(tArgTypes, typeof(TTarget)); return Dynamic.InvokeCallSite(CreateCallSite(tDelagateType, binderType,knownType, binder, name, context, argNames), target, args); } } #if !__MonoCS__ internal static Delegate WrapFuncHelper<TReturn>(dynamic invokable, int length) { switch(length){ #region Optimizations case 0: return new Func< TReturn>(()=> invokable()); case 1: return new Func< object, TReturn>((a1)=> invokable(a1)); case 2: return new Func< object, object, TReturn>((a1,a2)=> invokable(a1,a2)); case 3: return new Func< object, object, object, TReturn>((a1,a2,a3)=> invokable(a1,a2,a3)); case 4: return new Func< object, object, object, object, TReturn>((a1,a2,a3,a4)=> invokable(a1,a2,a3,a4)); case 5: return new Func< object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5)=> invokable(a1,a2,a3,a4,a5)); case 6: return new Func< object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6)=> invokable(a1,a2,a3,a4,a5,a6)); case 7: return new Func< object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7)=> invokable(a1,a2,a3,a4,a5,a6,a7)); case 8: return new Func< object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8)); case 9: return new Func< object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9)); case 10: return new Func< object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)); case 11: return new Func< object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)); case 12: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)); case 13: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)); case 14: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)); case 15: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15)); case 16: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16)); #endregion default: return new DynamicFunc<TReturn>(args=>(TReturn)Dynamic.Invoke((object)invokable,args)); } } #endif internal static class MonoConvertCallSite<T>{ internal static CallSite CallSite; } internal static Delegate WrapFuncHelperMono<TReturn>(dynamic invokable, int length) { switch(length){ #region Optimizations case 0: return new Func< TReturn>(()=>{ object tResult= invokable(); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 1: return new Func< object, TReturn>((a1)=>{ object tResult= invokable(a1); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 2: return new Func< object, object, TReturn>((a1,a2)=>{ object tResult= invokable(a1,a2); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 3: return new Func< object, object, object, TReturn>((a1,a2,a3)=>{ object tResult= invokable(a1,a2,a3); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 4: return new Func< object, object, object, object, TReturn>((a1,a2,a3,a4)=>{ object tResult= invokable(a1,a2,a3,a4); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 5: return new Func< object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5)=>{ object tResult= invokable(a1,a2,a3,a4,a5); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 6: return new Func< object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 7: return new Func< object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 8: return new Func< object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 9: return new Func< object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 10: return new Func< object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 11: return new Func< object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 12: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 13: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 14: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 15: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); case 16: return new Func< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, TReturn>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16)=>{ object tResult= invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); #endregion default: return new DynamicFunc<TReturn>(args=>{ object tResult= Dynamic.Invoke((object)invokable,args); return (TReturn) InvokeConvertCallSite(tResult, true, typeof(TReturn), typeof(object), ref MonoConvertCallSite<TReturn>.CallSite); }); } } internal static Delegate WrapAction(dynamic invokable, int length) { switch(length){ #region Optimizations case 0: return new Action(()=>invokable()); case 1: return new Action< object>((a1)=> invokable(a1)); case 2: return new Action< object, object>((a1,a2)=> invokable(a1,a2)); case 3: return new Action< object, object, object>((a1,a2,a3)=> invokable(a1,a2,a3)); case 4: return new Action< object, object, object, object>((a1,a2,a3,a4)=> invokable(a1,a2,a3,a4)); case 5: return new Action< object, object, object, object, object>((a1,a2,a3,a4,a5)=> invokable(a1,a2,a3,a4,a5)); case 6: return new Action< object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6)=> invokable(a1,a2,a3,a4,a5,a6)); case 7: return new Action< object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7)=> invokable(a1,a2,a3,a4,a5,a6,a7)); case 8: return new Action< object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8)); case 9: return new Action< object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9)); case 10: return new Action< object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10)); case 11: return new Action< object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11)); case 12: return new Action< object, object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12)); case 13: return new Action< object, object, object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13)); case 14: return new Action< object, object, object, object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)); case 15: return new Action< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15)); case 16: return new Action< object, object, object, object, object, object, object, object, object, object, object, object, object, object, object, object>((a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16)=> invokable(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16)); #endregion default: return new DynamicAction(args=>Dynamic.InvokeAction((object)invokable,args)); } } internal static object FastDynamicInvokeReturn(Delegate del, dynamic [] args) { dynamic tDel =del; switch(args.Length){ default: try { return del.DynamicInvoke(args); } catch (TargetInvocationException ex) { throw ex.InnerException; } #region Optimization case 1: return tDel(args[0]); case 2: return tDel(args[0],args[1]); case 3: return tDel(args[0],args[1],args[2]); case 4: return tDel(args[0],args[1],args[2],args[3]); case 5: return tDel(args[0],args[1],args[2],args[3],args[4]); case 6: return tDel(args[0],args[1],args[2],args[3],args[4],args[5]); case 7: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6]); case 8: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7]); case 9: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8]); case 10: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9]); case 11: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10]); case 12: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11]); case 13: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12]); case 14: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13]); case 15: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13],args[14]); case 16: return tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13],args[14],args[15]); #endregion } } internal static void FastDynamicInvokeAction(Delegate del, params dynamic [] args) { dynamic tDel =del; switch(args.Length){ default: try { del.DynamicInvoke(args); } catch (TargetInvocationException ex) { throw ex.InnerException; } return; #region Optimization case 1: tDel(args[0]); return; case 2: tDel(args[0],args[1]); return; case 3: tDel(args[0],args[1],args[2]); return; case 4: tDel(args[0],args[1],args[2],args[3]); return; case 5: tDel(args[0],args[1],args[2],args[3],args[4]); return; case 6: tDel(args[0],args[1],args[2],args[3],args[4],args[5]); return; case 7: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6]); return; case 8: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7]); return; case 9: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8]); return; case 10: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9]); return; case 11: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10]); return; case 12: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11]); return; case 13: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12]); return; case 14: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13]); return; case 15: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13],args[14]); return; case 16: tDel(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],args[9],args[10],args[11],args[12],args[13],args[14],args[15]); return; #endregion } } } }
apache-2.0
rrenomeron/cas
support/cas-server-support-throttle-redis/src/main/java/org/apereo/cas/web/support/RedisThrottledSubmissionHandlerInterceptorAdapter.java
2579
package org.apereo.cas.web.support; import org.apereo.cas.audit.RedisAuditTrailManager; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.apereo.inspektr.audit.AuditActionContext; import org.apereo.inspektr.common.web.ClientInfoHolder; import org.springframework.data.redis.core.RedisTemplate; import javax.servlet.http.HttpServletRequest; import java.util.Comparator; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; /** * Works in conjunction with a redis database to * block attempts to dictionary attack users. * * @author Misagh Moayyed * @since 6.1.0 */ @Slf4j public class RedisThrottledSubmissionHandlerInterceptorAdapter extends AbstractInspektrAuditHandlerInterceptorAdapter { private final transient RedisTemplate redisTemplate; public RedisThrottledSubmissionHandlerInterceptorAdapter(final ThrottledSubmissionHandlerConfigurationContext configurationContext, final RedisTemplate redisTemplate) { super(configurationContext); this.redisTemplate = redisTemplate; } @Override public boolean exceedsThreshold(final HttpServletRequest request) { val clientInfo = ClientInfoHolder.getClientInfo(); val remoteAddress = clientInfo.getClientIpAddress(); val keys = (Set<String>) this.redisTemplate.keys(RedisAuditTrailManager.CAS_AUDIT_CONTEXT_PREFIX + '*'); val failures = Objects.requireNonNull(keys) .stream() .map(redisKey -> this.redisTemplate.boundValueOps(redisKey)) .map(AuditActionContext.class::cast) .filter(audit -> audit.getPrincipal().equalsIgnoreCase(getUsernameParameterFromRequest(request)) && audit.getClientIpAddress().equalsIgnoreCase(remoteAddress) && audit.getActionPerformed().equalsIgnoreCase(getConfigurationContext().getAuthenticationFailureCode()) && audit.getApplicationCode().equalsIgnoreCase(getConfigurationContext().getApplicationCode()) && audit.getWhenActionWasPerformed().compareTo(getFailureInRangeCutOffDate()) >= 0) .sorted(Comparator.comparing(AuditActionContext::getWhenActionWasPerformed).reversed()) .limit(2) .map(AuditActionContext::getWhenActionWasPerformed) .collect(Collectors.toList()); return calculateFailureThresholdRateAndCompare(failures); } @Override public String getName() { return "RedisThrottle"; } }
apache-2.0
elventear/dropwizard-common
dropwizard-success-meter/src/main/java/smartthings/dw/successmeter/SuccessMeterModule.java
548
package smartthings.dw.successmeter; import smartthings.dw.guice.AbstractDwModule; import static com.google.inject.matcher.Matchers.annotatedWith; import static com.google.inject.matcher.Matchers.any; public class SuccessMeterModule extends AbstractDwModule { @Override protected void configure() { MethodInvocationSuccessMeterInterceptor interceptor = new MethodInvocationSuccessMeterInterceptor(); requestInjection(interceptor); bindInterceptor(any(), annotatedWith(SuccessMeter.class), interceptor); } }
apache-2.0
flofreud/aws-sdk-java
aws-java-sdk-kms/src/main/java/com/amazonaws/services/kms/model/ReEncryptResult.java
8596
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.kms.model; import java.io.Serializable; /** * */ public class ReEncryptResult implements Serializable, Cloneable { /** * <p> * The re-encrypted data. If you are using the CLI, the value is Base64 * encoded. Otherwise, it is not encoded. * </p> */ private java.nio.ByteBuffer ciphertextBlob; /** * <p> * Unique identifier of the key used to originally encrypt the data. * </p> */ private String sourceKeyId; /** * <p> * Unique identifier of the key used to re-encrypt the data. * </p> */ private String keyId; /** * <p> * The re-encrypted data. If you are using the CLI, the value is Base64 * encoded. Otherwise, it is not encoded. * </p> * <p> * AWS SDK for Java performs a Base64 encoding on this field before sending * this request to AWS service by default. Users of the SDK should not * perform Base64 encoding on this field. * </p> * <p> * Warning: ByteBuffers returned by the SDK are mutable. Changes to the * content or position of the byte buffer will be seen by all objects that * have a reference to this object. It is recommended to call * ByteBuffer.duplicate() or ByteBuffer.asReadOnlyBuffer() before using or * reading from the buffer. This behavior will be changed in a future major * version of the SDK. * </p> * * @param ciphertextBlob * The re-encrypted data. If you are using the CLI, the value is * Base64 encoded. Otherwise, it is not encoded. */ public void setCiphertextBlob(java.nio.ByteBuffer ciphertextBlob) { this.ciphertextBlob = ciphertextBlob; } /** * <p> * The re-encrypted data. If you are using the CLI, the value is Base64 * encoded. Otherwise, it is not encoded. * </p> * <p> * {@code ByteBuffer}s are stateful. Calling their {@code get} methods * changes their {@code position}. We recommend using * {@link java.nio.ByteBuffer#asReadOnlyBuffer()} to create a read-only view * of the buffer with an independent {@code position}, and calling * {@code get} methods on this rather than directly on the returned * {@code ByteBuffer}. Doing so will ensure that anyone else using the * {@code ByteBuffer} will not be affected by changes to the {@code position} * . * </p> * * @return The re-encrypted data. If you are using the CLI, the value is * Base64 encoded. Otherwise, it is not encoded. */ public java.nio.ByteBuffer getCiphertextBlob() { return this.ciphertextBlob; } /** * <p> * The re-encrypted data. If you are using the CLI, the value is Base64 * encoded. Otherwise, it is not encoded. * </p> * * @param ciphertextBlob * The re-encrypted data. If you are using the CLI, the value is * Base64 encoded. Otherwise, it is not encoded. * @return Returns a reference to this object so that method calls can be * chained together. */ public ReEncryptResult withCiphertextBlob(java.nio.ByteBuffer ciphertextBlob) { setCiphertextBlob(ciphertextBlob); return this; } /** * <p> * Unique identifier of the key used to originally encrypt the data. * </p> * * @param sourceKeyId * Unique identifier of the key used to originally encrypt the data. */ public void setSourceKeyId(String sourceKeyId) { this.sourceKeyId = sourceKeyId; } /** * <p> * Unique identifier of the key used to originally encrypt the data. * </p> * * @return Unique identifier of the key used to originally encrypt the data. */ public String getSourceKeyId() { return this.sourceKeyId; } /** * <p> * Unique identifier of the key used to originally encrypt the data. * </p> * * @param sourceKeyId * Unique identifier of the key used to originally encrypt the data. * @return Returns a reference to this object so that method calls can be * chained together. */ public ReEncryptResult withSourceKeyId(String sourceKeyId) { setSourceKeyId(sourceKeyId); return this; } /** * <p> * Unique identifier of the key used to re-encrypt the data. * </p> * * @param keyId * Unique identifier of the key used to re-encrypt the data. */ public void setKeyId(String keyId) { this.keyId = keyId; } /** * <p> * Unique identifier of the key used to re-encrypt the data. * </p> * * @return Unique identifier of the key used to re-encrypt the data. */ public String getKeyId() { return this.keyId; } /** * <p> * Unique identifier of the key used to re-encrypt the data. * </p> * * @param keyId * Unique identifier of the key used to re-encrypt the data. * @return Returns a reference to this object so that method calls can be * chained together. */ public ReEncryptResult withKeyId(String keyId) { setKeyId(keyId); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getCiphertextBlob() != null) sb.append("CiphertextBlob: " + getCiphertextBlob() + ","); if (getSourceKeyId() != null) sb.append("SourceKeyId: " + getSourceKeyId() + ","); if (getKeyId() != null) sb.append("KeyId: " + getKeyId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof ReEncryptResult == false) return false; ReEncryptResult other = (ReEncryptResult) obj; if (other.getCiphertextBlob() == null ^ this.getCiphertextBlob() == null) return false; if (other.getCiphertextBlob() != null && other.getCiphertextBlob().equals(this.getCiphertextBlob()) == false) return false; if (other.getSourceKeyId() == null ^ this.getSourceKeyId() == null) return false; if (other.getSourceKeyId() != null && other.getSourceKeyId().equals(this.getSourceKeyId()) == false) return false; if (other.getKeyId() == null ^ this.getKeyId() == null) return false; if (other.getKeyId() != null && other.getKeyId().equals(this.getKeyId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getCiphertextBlob() == null) ? 0 : getCiphertextBlob() .hashCode()); hashCode = prime * hashCode + ((getSourceKeyId() == null) ? 0 : getSourceKeyId().hashCode()); hashCode = prime * hashCode + ((getKeyId() == null) ? 0 : getKeyId().hashCode()); return hashCode; } @Override public ReEncryptResult clone() { try { return (ReEncryptResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
apache-2.0
jcnelson/syndicate
old/md-service/SMDS/web2py/applications/SMDS/models/0.py
635
from gluon.storage import Storage settings = Storage() settings.migrate = True settings.title = 'Syndicate Metadata Service' settings.subtitle = 'Princeton University' settings.author = 'Jude Nelson' settings.author_email = '[email protected]' settings.keywords = '' settings.description = 'Web front-end for the Syndicate Metadata Service' settings.layout_theme = 'Default' settings.database_uri = '' settings.security_key = '96b4dcd8-c27d-4b0e-8fbf-8a2ccc0e4db4' settings.email_server = '' settings.email_sender = '' settings.email_login = '' settings.login_method = 'local' settings.login_config = '' settings.plugins = []
apache-2.0
OHDSI/Olympus
src/main/webapp/Heracles/src/js/charts/observations.js
25745
define(["jquery", "bootstrap", "d3","jnj_chart", "ohdsi_common", "datatables", "datatables-colvis", "colorbrewer", "tabletools"], function ($, bootstrap, d3, jnj_chart, common, DataTables, DataTablesColvis, colorbrewer, TableTools) { function ObservationsRenderer() {} ObservationsRenderer.prototype = {}; ObservationsRenderer.prototype.constructor = ObservationsRenderer; ObservationsRenderer.render = function(cohort) { d3.selectAll("svg").remove(); var id = cohort.id; this.baseUrl = getSourceSpecificWebApiUrl() + 'cohortresults/' + id; var threshold; var datatable; // bind to all matching elements upon creation $(document).on('click', '#observation_table tbody tr', function () { $('#observation_table tbody tr.selected').removeClass('selected'); $(this).addClass('selected'); var data = datatable.data()[datatable.row(this)[0]]; if (data) { var did = data.concept_id; var concept_name = data.snomed; ObservationsRenderer.drilldown(did, concept_name); } }); $(document).on( 'shown.bs.tab', 'a[data-toggle="tab"]', function (e) { $(window).trigger("resize"); // Version 1. $('table:visible').each(function() { var oTableTools = TableTools.fnGetInstance(this); if (oTableTools && oTableTools.fnResizeRequired()) { oTableTools.fnResizeButtons(); } }); }); ObservationsRenderer.drilldown = function (concept_id, concept_name) { $('#loading-text').text("Querying Database..."); $('#spinner-modal').modal('show'); $('.drilldown svg').remove(); $('#observationDrilldownTitle').text(concept_name); $('#reportObservationDrilldown').removeClass('hidden'); $.ajax({ type: "GET", url: ObservationsRenderer.baseUrl + '/observation/' + concept_id, success: function (data) { $('#loading-text').text("Rendering Visualizations..."); if (data) { // age at first diagnosis visualization var firstDiagnosis = common.normalizeArray(data.ageAtFirstOccurrence); if (!firstDiagnosis.empty) { var ageAtFirstOccurrence = new jnj_chart.boxplot(); var bpseries = []; var bpdata = common.normalizeDataframe(firstDiagnosis); for (var i = 0; i < bpdata.category.length; i++) { bpseries.push({ Category: bpdata.category[i], min: bpdata.minValue[i], max: bpdata.maxValue[i], median: bpdata.medianValue[i], LIF: bpdata.p10Value[i], q1: bpdata.p25Value[i], q3: bpdata.p75Value[i], UIF: bpdata.p90Value[i] }); } ageAtFirstOccurrence.render(bpseries, "#ageAtFirstOccurrence", 500, 300, { xLabel: 'Gender', yLabel: 'Age at First Occurrence' }); } common.generateCSVDownload($("#ageAtFirstOccurrence"), data.ageAtFirstOccurrence, "ageAtFirstOccurrence"); // prevalence by month var prevData = common.normalizeArray(data.prevalenceByMonth); if (!prevData.empty) { var byMonthSeries = common.mapMonthYearDataToSeries(prevData, { dateField: 'xCalendarMonth', yValue: 'yPrevalence1000Pp', yPercent: 'yPrevalence1000Pp' }); var prevalenceByMonth = new jnj_chart.line(); prevalenceByMonth.render(byMonthSeries, "#observationPrevalenceByMonth", 1000, 300, { xScale: d3.time.scale().domain(d3.extent(byMonthSeries[0].values, function (d) { return d.xValue; })), xFormat: d3.time.format("%m/%Y"), tickFormat: function (d) { var monthFormat = d3.time.format("%m/%Y"); var yearFormat = d3.time.format("%Y"); return (d.getMonth() === 0) ? yearFormat(d) : monthFormat(d); }, xLabel: "Date", yLabel: "Prevalence per 1000 People" }); } common.generateCSVDownload($("#observationPrevalenceByMonth"), data.prevalenceByMonth, "observationPrevalenceByMonth"); // observation type visualization if (data.observationsByType && data.observationsByType.length > 0) { var observationsByType = new jnj_chart.donut(); observationsByType.render(common.mapConceptData(data.observationsByType), "#observationsByType", 500, 300, { margin: { top: 5, left: 5, right: 220, bottom: 5 } }); } common.generateCSVDownload($("#observationsByType"), data.observationsByType, "observationsByType"); // render trellis var trellisData = common.normalizeArray(data.prevalenceByGenderAgeYear, true); if (!trellisData.empty) { var allDeciles = ["0-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80-89", "90-99"]; var allSeries = ["MALE", "FEMALE"]; var minYear = d3.min(trellisData.xCalendarYear), maxYear = d3.max(trellisData.xCalendarYear); var seriesInitializer = function (tName, sName, x, y) { return { trellisName: tName, seriesName: sName, xCalendarYear: x, yPrevalence1000Pp: y }; }; var nestByDecile = d3.nest() .key(function (d) { return d.trellisName; }) .key(function (d) { return d.seriesName; }) .sortValues(function (a, b) { return a.xCalendarYear - b.xCalendarYear; }); // map data into chartable form var normalizedSeries = trellisData.trellisName.map(function (d, i) { var item = {}; var container = this; d3.keys(container).forEach(function (p) { item[p] = container[p][i]; }); return item; }, trellisData); var dataByDecile = nestByDecile.entries(normalizedSeries); // fill in gaps var yearRange = d3.range(minYear, maxYear, 1); dataByDecile.forEach(function (trellis) { trellis.values.forEach(function (series) { series.values = yearRange.map(function (year) { yearData = series.values.filter(function (f) { return f.xCalendarYear === year; })[0] || seriesInitializer(trellis.key, series.key, year, 0); yearData.date = new Date(year, 0, 1); return yearData; }); }); }); // create svg with range bands based on the trellis names var chart = new jnj_chart.trellisline(); chart.render(dataByDecile, "#trellisLinePlot", 1000, 300, { trellisSet: allDeciles, trellisLabel: "Age Decile", seriesLabel: "Year of Observation", yLabel: "Prevalence Per 1000 People", xFormat: d3.time.format("%Y"), yFormat: d3.format("0.2f"), tickPadding: 20, colors: d3.scale.ordinal() .domain(["MALE", "FEMALE", "UNKNOWN",]) .range(["#1F78B4", "#FB9A99", "#33A02C"]) }); } common.generateCSVDownload($("#trellisLinePlot"), data.prevalenceByGenderAgeYear, "prevalenceByGenderAgeYear"); // Records by Unit var recordsByUnit = new jnj_chart.donut(); var datdaRecordsByUnit = []; var recordsByUnitData = common.normalizeArray(data.recordsByUnit); if (!recordsByUnitData.empty) { if (recordsByUnitData.conceptName instanceof Array) { datdaRecordsByUnit = recordsByUnitData.conceptName.map(function (d, i) { var item = { id: this.conceptName[i], label: this.conceptName[i], value: this.countValue[i] }; return item; }, recordsByUnitData); } else { datdaRecordsByUnit.push( { id: recordsByUnitData.conceptName, label: recordsByUnitData.conceptName, value: recordsByUnitData.countValue }); } datdaRecordsByUnit.sort(function (a, b) { var nameA = a.label.toLowerCase(), nameB = b.label.toLowerCase(); if (nameA < nameB) { //sort string ascending return -1; } if (nameA > nameB) { return 1; } return 0; //default return value (no sorting) }); recordsByUnit.render(datdaRecordsByUnit, "#recordsByUnit", 500, 300, { margin: { top: 5, left: 5, right: 200, bottom: 5 } }); } common.generateCSVDownload($("#recordsByUnit"), data.recordsByUnit, "recordsByUnit"); // Observation Value Distribution var obsValueDist = common.normalizeArray(data.observationValueDistribution); if (!obsValueDist.empty) { var observationValues = new jnj_chart.boxplot(); var obpseries = []; obpdata = common.normalizeDataframe(obsValueDist); obpseries = obpdata.category.map(function (d, i) { var item = { Category: obpdata.category[i], min: obpdata.minValue[i], max: obpdata.maxValue[i], median: obpdata.medianValue[i], LIF: obpdata.p10Value[i], q1: obpdata.p25Value[i], q3: obpdata.p75Value[i], UIF: obpdata.p90Value[i] }; return item; }, obpdata); observationValues.render(obpseries, "#observationValues", 500, 300, { yMax: d3.max(obpdata.p90Value) || obpdata.p90Value, // handle when dataframe is not array of values xLabel: 'Unit', yLabel: 'Observation Value' }); } common.generateCSVDownload($("#observationValues"), data.observationValueDistribution, "observationValues"); } $('#spinner-modal').modal('hide'); }, error : function() { $('#spinner-modal').modal('hide'); } }); }; function getColors(data) { /* console.log(data); if (data.length <= 3) { var colors = []; $.each(data, function() { var lbl = this.label.toLowerCase(); if (lbl.indexOf("above") >= 0 || lbl.indexOf("high") >= 0) { colors.push("#e31a1c"); } else if (lbl.indexOf("below") >= 0 || lbl.indexOf("low") >= 0) { colors.push("#1f78b4"); } else if (lbl.indexOf("normal") >= 0 || lbl.indexOf("within") >= 0) { colors.push("#33a02c"); } else { colors.push("#6a3d9a"); } }); console.log(colors); return colors; } */ return colorbrewer.Dark2[3]; } function buildHierarchyFromJSON(data, threshold) { var total = 0; var root = { "name": "root", "children": [] }; for (i = 0; i < data.percentPersons.length; i++) { total += data.percentPersons[i]; } for (var i = 0; i < data.conceptPath.length; i++) { var parts = data.conceptPath[i].split("||"); var currentNode = root; for (var j = 0; j < parts.length; j++) { var children = currentNode.children; var nodeName = parts[j]; var childNode; if (j + 1 < parts.length) { // Not yet at the end of the path; move down the tree. var foundChild = false; for (var k = 0; k < children.length; k++) { if (children[k].name === nodeName) { childNode = children[k]; foundChild = true; break; } } // If we don't already have a child node for this branch, create it. if (!foundChild) { childNode = { "name": nodeName, "children": [] }; children.push(childNode); } currentNode = childNode; } else { // Reached the end of the path; create a leaf node. childNode = { "name": nodeName, "num_persons": data.numPersons[i], "id": data.conceptId[i], "path": data.conceptPath[i], "pct_persons": data.percentPersons[i], "records_per_person": data.recordsPerPerson[i] }; // we only include nodes with sufficient size in the treemap display // sufficient size is configurable in the calculation of threshold // which is a function of the number of pixels in the treemap display if ((data.percentPersons[i] / total) > threshold) { children.push(childNode); } } } } return root; } // show the treemap $('#loading-text').text("Querying Database..."); $('#spinner-modal').modal('show'); var format_pct = d3.format('.2%'); var format_fixed = d3.format('.2f'); var format_comma = d3.format(','); $('#reportObservationOccurrences svg').remove(); var width = 1000; var height = 250; var minimum_area = 50; threshold = minimum_area / (width * height); $.ajax({ type: "GET", url: ObservationsRenderer.baseUrl + '/observation', contentType: "application/json; charset=utf-8", success: function (data) { $('#loading-text').text("Rendering Visualizations..."); var normalizedData = common.normalizeDataframe(common.normalizeArray(data, true)); data = normalizedData; if (!data.empty) { var table_data = normalizedData.conceptPath.map(function (d, i) { conceptDetails = this.conceptPath[i].split('||'); return { concept_id: this.conceptId[i], level_4: conceptDetails[0], level_3: conceptDetails[1], level_2: conceptDetails[2], observation_name: conceptDetails[3], num_persons: format_comma(this.numPersons[i]), percent_persons: format_pct(this.percentPersons[i]), records_per_person: format_fixed(this.recordsPerPerson[i]) }; }, data); datatable = $('#observation_table').DataTable({ order: [6, 'desc'], dom: 'T<"clear">lfrtip', data: table_data, columns: [ { data: 'concept_id' }, { data: 'level_4' }, { data: 'level_3', visible: false }, { data: 'level_2' }, { data: 'observation_name' }, { data: 'num_persons', className: 'numeric' }, { data: 'percent_persons', className: 'numeric' }, { data: 'records_per_person', className: 'numeric' } ], pageLength: 5, lengthChange: false, deferRender: true, destroy: true }); $('#reportObservationOccurrences').show(); tree = buildHierarchyFromJSON(data, threshold); var treemap = new jnj_chart.treemap(); treemap.render(tree, '#treemap_container', width, height, { onclick: function (node) { ObservationsRenderer.drilldown(node.id, node.name); }, getsizevalue: function (node) { return node.num_persons; }, getcolorvalue: function (node) { return node.records_per_person; }, getcolorrange: function() { return colorbrewer.Paired[3]; }, getcontent: function (node) { var result = '', steps = node.path.split('||'), i = steps.length - 1; result += '<div class="pathleaf">' + steps[i] + '</div>'; result += '<div class="pathleafstat">Prevalence: ' + format_pct(node.pct_persons) + '</div>'; result += '<div class="pathleafstat">Number of People: ' + format_comma(node.num_persons) + '</div>'; result += '<div class="pathleafstat">Records per Person: ' + format_fixed(node.records_per_person) + '</div>'; return result; }, gettitle: function (node) { var title = '', steps = node.path.split('||'); for (i = 0; i < steps.length - 1; i++) { title += ' <div class="pathstep">' + Array(i + 1).join('&nbsp;&nbsp') + steps[i] + ' </div>'; } return title; } }); $('[data-toggle="popover"]').popover(); } $('#spinner-modal').modal('hide'); }, error : function(data) { $('#spinner-modal').modal('hide'); } }); return ObservationsRenderer; }; return ObservationsRenderer; });
apache-2.0
taobataoma/saivi
tpl/Wap/default/Business_plist.html
4214
<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>{Saivi:$info.title}</title> <meta name="viewport" content="width=device-width,height=device-height,inital-scale=1.0,maximum-scale=1.0,user-scalable=no;"> <meta name="apple-mobile-web-app-capable" content="yes"> <meta name="apple-mobile-web-app-status-bar-style" content="black"> <meta name="format-detection" content="telephone=no"> <meta charset="utf-8"> <link href="{Saivi::RES}/css/Photo/css/photo.css" rel="stylesheet" type="text/css" /> <link href="{Saivi::RES}/css/Photo/css/photoswipe.css" type="text/css" rel="stylesheet" /> <script src="{Saivi::RES}/css/Photo/js/klass.min.js" type="text/javascript"></script> <script src="{Saivi::RES}/css/Photo/js/code.photoswipe-3.0.5.min.js" type="text/javascript"></script> <script type="text/javascript"> (function(window, PhotoSwipe){ document.addEventListener('DOMContentLoaded', function(){ var options = {}, instance = PhotoSwipe.attach( window.document.querySelectorAll('#Gallery a'), options ); }, false); }(window, window.Code.PhotoSwipe)); </script> <script type="text/javascript"> window.onload = function () { var oWin = document.getElementById("win"); var oLay = document.getElementById("overlay"); var oBtn = document.getElementById("popmenu"); var oClose = document.getElementById("close"); oBtn.onclick = function () { oLay.style.display = "block"; oWin.style.display = "block"; //oWin.style.zIndex = 99999; }; oLay.onclick = function () { oLay.style.display = "none"; oWin.style.display = "none"; } }; $('#plug-wrap').removeClass(); </script> <link rel="stylesheet" href="{Saivi::STATICS}/schools/css/plugmenu.css"> <link href="{Saivi::STATICS}/schools/css/news3_3.css" rel="stylesheet" type="text/css" /> </head> <body id="photo"> <include file="Business:public_menu"/> <br><br> <if condition="$headpic neq ''"> <div class="qiandaobanner"> <a ><img src="{Saivi:$headpic}" ></a> </div> </if> <div id="main" role="main"> <ul id="Gallery" class="gallery"> <volist name="photo" id="photo"> <li><a href="{Saivi:$photo.picurl}"><img src="{Saivi:$photo.picurl}" alt=" "></a> </li> </volist> </ul> </div> <!--jquery.min--> <script src="{Saivi::RES}/css/Photo/js/jquery.min.js" type="text/javascript"></script> <!--下面是瀑布流js--> <script src="{Saivi::RES}/css/Photo/js/jquery.imagesloaded.js" type="text/javascript"></script> <script src="{Saivi::RES}/css/Photo/js/jquery.wookmark.min.js" type="text/javascript"></script> <script type="text/javascript"> (function ($){ $('#Gallery').imagesLoaded(function() { // Prepare layout options. var options = { autoResize: true, // This will auto-update the layout when the browser window is resized. container: $('#main'), // Optional, used for some extra CSS styling offset: 4, // Optional, the distance between grid items itemWidth: 150 // Optional, the width of a grid item }; // Get a reference to your grid items. var handler = $('#Gallery li'); // Call the layout function. handler.wookmark(options); }); })(jQuery); </script> <div class="footerWrapper" style="padding-left: 30%;"> <span class="copyright"> <if condition="$iscopyright eq 1"> {Saivi:$homeInfo.copyright} <else/> {Saivi:$siteCopyright} </if> </span> </div> <include file="Index:styleInclude"/><include file="$cateMenuFileName"/></p> <script type="text/javascript"> window.shareData = { "moduleName":"Business", "moduleID":"{Saivi:$bid}", "imgUrl": "{Saivi:$info['picurl']}", "sendFriendLink": "{Saivi::C('site_url')}{Saivi::U('Business/plist',array('token'=>$token,'bid'=>$bid,'type'=>$type))}", "tTitle": "{Saivi:$info.title}", "tContent": "{Saivi:$info.info|strip_tags}" }; </script> {Saivi:$shareScript} </body> </html>
apache-2.0
nioinnovation/nio-cli
tests/test_cli.py
27669
import responses import unittest from unittest import skipIf from unittest.mock import mock_open, patch, ANY, call from docopt import docopt, DocoptExit from io import StringIO from collections import OrderedDict import sys import nio_cli.cli as cli from nio_cli.commands.base import Base from nio.block.terminals import input try: import niocore niocore_installed = True except: niocore_installed = False class TestCLI(unittest.TestCase): def parse_args(self, command): return docopt(cli.__doc__, command.split(' ')) def test_new_arguments(self): """'new' requires a project-name""" args = self.parse_args('new project') self.assertEqual(args['<project-name>'], 'project') with self.assertRaises(DocoptExit): self.parse_args('new') def test_buildpsec_arguments(self): """'buildspec' requires a repo-name""" args = self.parse_args('buildspec repo') self.assertEqual(args['<repo-name>'], 'repo') with self.assertRaises(DocoptExit): self.parse_args('buildspec') def test_buildreadme_arguments(self): """'buildreadme' take no args""" args = self.parse_args('buildreadme') with self.assertRaises(DocoptExit): self.parse_args('buildreadme some-args') def test_new_command(self): """Clone the project template from GitHub""" with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \ patch('nio_cli.commands.new.subprocess.call') as call, \ patch('nio_cli.commands.new.config_project') as config: self._patched_new_command(call, config) def _patched_new_command(self, call, config): self._main('new', **{ '<project-name>': 'project', }) config.assert_called_once_with(name='project', niohost='127.0.0.1', nioport='8181', pubkeeper_hostname=None, pubkeeper_token=None, ssl=True, instance_id=None) self.assertEqual(call.call_args_list[0][0][0], ( 'git clone ' 'git://github.com/niolabs/project_template.git project' )) self.assertEqual(call.call_args_list[1][0][0], ( 'cd ./project ' '&& git submodule update --init --recursive' )) self.assertEqual(call.call_args_list[2][0][0], ( 'cd ./project ' '&& git remote remove origin ' '&& git commit --amend --reset-author --quiet -m "Initial commit"' )) def test_new_command_set_user(self): """Clone the project template from GitHub""" with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \ patch('nio_cli.commands.new.subprocess.call') as call, \ patch('nio_cli.commands.new.set_user') as user, \ patch('nio_cli.commands.new.config_project') as config: self._patched_new_command_set_user(call, user, config) def _patched_new_command_set_user(self, call, user, config): self._main('new', **{ '<project-name>': 'project', '--username': 'new_user', '--password': 'new_password', }) user.assert_called_once_with('project', 'new_user', 'new_password', True) config.assert_called_once_with(name='project', niohost='127.0.0.1', nioport='8181', pubkeeper_hostname=None, pubkeeper_token=None, ssl=True, instance_id=None) self.assertEqual(call.call_args_list[0][0][0], ( 'git clone ' 'git://github.com/niolabs/project_template.git project' )) self.assertEqual(call.call_args_list[1][0][0], ( 'cd ./project ' '&& git submodule update --init --recursive' )) self.assertEqual(call.call_args_list[2][0][0], ( 'cd ./project ' '&& git remote remove origin ' '&& git commit --amend --reset-author --quiet -m "Initial commit"' )) def test_new_command_template(self): """Clone the project template from GitHub""" with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \ patch('nio_cli.commands.new.subprocess.call') as call, \ patch('nio_cli.commands.new.config_project') as config: self._patched_new_command_template(call, config) def _patched_new_command_template(self, call, config): with patch('nio_cli.commands.new.os.walk') as patched_os_walk: join_module = 'nio_cli.commands.new.os.path.join' with patch(join_module, return_value='join'): patched_os_walk.return_value = [ ('root', ('dirs'), ['requirements.txt'])] self._main('new', **{ '<project-name>': 'project', '<template>': 'my_template', '--pubkeeper-hostname': 'pkhost', '--pubkeeper-token': 'pktoken', '--instance-id': 'abc-123', }) config.assert_called_once_with(name='project', niohost='127.0.0.1', nioport='8181', pubkeeper_hostname='pkhost', pubkeeper_token='pktoken', ssl=True, instance_id='abc-123') self.assertEqual(call.call_args_list[0][0][0], ( 'git clone ' 'git://github.com/niolabs/my_template.git project' )) self.assertEqual(call.call_args_list[1][0][0], ( 'cd ./project ' '&& git submodule update --init --recursive' )) self.assertEqual(call.call_args_list[2][0][0], ( [sys.executable, '-m', 'pip', 'install', '-r', 'join'] )) self.assertEqual(call.call_args_list[3][0][0], ( 'cd ./project ' '&& git remote remove origin ' '&& git commit --amend --reset-author --quiet ' '-m "Initial commit"' )) def test_new_command_with_failed_clone(self): """Cleanly handle new command when 'git clone' fails""" isdir_path = 'nio_cli.commands.new.os.path.isdir' with patch(isdir_path, return_value=False) as isdir, \ patch('nio_cli.commands.new.subprocess.call') as call: self._main('new', **{ '<project-name>': 'project', '--username': 'user', '--password': 'pwd' }) self.assertEqual(call.call_count, 1) isdir.assert_called_once_with('project') @responses.activate def test_add_command(self): """Clone specified blocks as submodules""" responses.add(responses.POST, 'http://127.0.0.1:8181/project/blocks') self._main('add', **{ '<block-repo>': ['block1'], '--project': '.' }) self.assertEqual(len(responses.calls), 1) self._main('add', **{ '<block-repo>': ['block1'], '--project': '.', '--upgrade': True }) self.assertEqual(len(responses.calls), 3) @responses.activate def test_list_command(self): """List blocks or services from the rest api""" service_response = [{'api': 'response'}, {'another': 'service'}] responses.add(responses.GET, 'http://127.0.0.1:8181/services', json=service_response) with patch('builtins.print') as print: self._main('list', **{ "services": True, '--username': 'user', '--password': 'pwd' }) self.assertEqual(len(responses.calls), 1) self.assertEqual(print.call_count, len(service_response)) for index, service in enumerate(service_response): self.assertDictEqual( print.call_args_list[index][0][0], service) @responses.activate def test_list_command_with_id(self): """List blocks or services from the rest api""" blk_response = {'id1': {'name': 'name1', 'id': 'id1'}, 'id2': {'name': 'name2', 'id': 'id2'}} responses.add(responses.GET, 'http://127.0.0.1:8181/blocks', json=blk_response) with patch('builtins.print') as mock_print: self._main('list', **{ "services": False, '--username': 'user', '--password': 'pwd' }) self.assertEqual(len(responses.calls), 1) self.assertEqual(mock_print.call_count, 2) call_args = [arg[0] for arg in mock_print.call_args_list] for blk in blk_response: # the order of responses is not guaranteed self.assertTrue( (blk_response[blk]['id'], blk_response[blk]['name']) in call_args) @responses.activate def test_shutdown_command(self): """Shutdown nio through the rest api""" responses.add(responses.GET, 'http://127.0.0.1:8181/shutdown') self._main('shutdown', **{ '--username': 'user', '--password': 'pwd' }) self.assertEqual(len(responses.calls), 1) @responses.activate def test_command_command(self): """Command a nio block through the rest api""" responses.add(responses.POST, 'http://127.0.0.1:8181/services/service/block/command') self._main('command', **{ '<command-name>': 'command', '<service-name>': 'service', '<block-name>': 'block', '--username': 'user', '--password': 'pwd' }) self.assertEqual(len(responses.calls), 1) def test_publishblock_command(self): """Create spec.json file from block class""" from nio.block.base import Block from nio.properties import StringProperty, VersionProperty from nio.command import command @command('commandit') @command('commander') @input("testInput") @input("testInput2") class SampleBlock1(Block): version = VersionProperty('1.2.3') str_prop = StringProperty( title='String Prop', default='default string', ) another = StringProperty( title='Another Prop', ) get_block_class_path = 'nio_cli.utils.spec._get_block_class' requests_path = 'nio_cli.commands.publishblock.requests' sample_spec = """ { "nio/SampleBlock1": { "description": "This is the description", "outputs": "The original output", "from_python": "myfile.SampleBlock1" } } """ sample_release = """ { "nio/SampleBlock1": { "language": "Python", "from_python": "myfile.SampleBlock1", "url": "git://myblock" } } """ with patch('builtins.open', new_callable=mock_open) as open_calls, \ patch(get_block_class_path) as mock_get_class, \ patch(requests_path) as mock_requests: open_calls.side_effect = [ mock_open(read_data=sample_spec).return_value, mock_open(read_data=sample_release).return_value ] # mocks to load existing spec.json and to discover blocks mock_get_class.return_value = SampleBlock1 # Exectute on repo 'myblocks' self._main('publishblock', **{ '--api-url': 'http://fake', '--api-token': 'token'}) mock_get_class.assert_called_with('myfile.SampleBlock1') self.maxDiff = None # One POST for spec and one for release self.assertEqual(mock_requests.post.call_count, 2) spec_call_args = mock_requests.post.call_args_list[0][1]['json'] release_call_args = mock_requests.post.call_args_list[1][1]['json'] self.assertDictEqual(spec_call_args, { 'nio/SampleBlock1': { 'description': 'This is the description', 'commands': { 'commander': {'params': {}}, 'commandit': {'params': {}} }, 'inputs': { 'testInput': {'description': ''}, 'testInput2': {'description': ''} }, 'outputs': 'The original output', # orig output preserved 'properties': { 'another': { 'default': None, 'title': 'Another Prop', 'type': 'StringType' }, 'str_prop': { 'default': 'default string', 'title': 'String Prop', 'type': 'StringType' } }, 'version': '1.2.0' # Make sure only major.minor } }) self.assertDictEqual(release_call_args, { 'nio/SampleBlock1': { "language": "Python", "version": "1.2.3", "url": "git://myblock" } }) @skipIf(not niocore_installed, 'niocore required for buildrelease') def test_buildrelease_command(self): """create release.json from block class""" from nio.block.base import Block from nio.properties import StringProperty, VersionProperty from nio.command import command @command('commandit') @command('commander') class SampleBlock1(Block): version = VersionProperty('0.1.0') str_prop = StringProperty( title='String Prop', default='default string', ) another = StringProperty( title='Another Prop', ) class SampleBlock2(Block): # if a block has no configured version prop, the version is 0.0.0 # by default pass discover_path = \ 'nio_cli.commands.buildrelease.Discover.discover_classes' json_dump_path = 'nio_cli.commands.buildrelease.json.dump' file_exists_path = 'nio_cli.commands.buildrelease.os.path.exists' subprocess_call_path = \ 'nio_cli.commands.buildrelease.subprocess.check_output' with patch(discover_path) as discover_classes, \ patch('builtins.open', mock_open()) as mock_file, \ patch(file_exists_path) as mock_file_exists, \ patch(json_dump_path) as mock_json_dump, \ patch(subprocess_call_path) as check_output: # mocks to load existing spec.json and to discover blocks mock_file_exists.return_value = True discover_classes.return_value = [SampleBlock1, SampleBlock2] check_output.return_value = \ b'origin [email protected]:niolabs/myblocks.git (fetch)' # Exectute on repo 'myblocks' self._main('buildrelease', **{'<repo-name>': 'myblocks'}) discover_classes.assert_called_once_with( 'blocks.myblocks', ANY, ANY) # json dump to file with formatting mock_json_dump.assert_called_once_with( { 'nio/SampleBlock2': { 'version': '0.0.0', 'language': 'Python', 'url': 'git://github.com/niolabs/myblocks.git' }, 'nio/SampleBlock1': { 'version': '0.1.0', 'language': 'Python', 'url': 'git://github.com/niolabs/myblocks.git'} }, mock_file(), indent=2, sort_keys=True) def test_newblock_command(self): """Clone the block template from GitHub""" with patch('nio_cli.commands.new.subprocess.call') as call, \ patch('builtins.open', mock_open( read_data='Example ..example_block TestExample') ) as mock_file, \ patch("nio_cli.commands.newblock.os") as os_mock, \ patch("nio_cli.commands.newblock.move") as move_mock: self._main('newblock', **{'<block-name>': 'yaba_daba'}) self.assertEqual(call.call_args_list[0][0][0], ( 'git clone ' 'git://github.com/nio-blocks/block_template.git yaba_daba' )) self.assertEqual(mock_file.call_args_list[0][0], ('./yaba_daba/yaba_daba_block.py',)) self.assertEqual( mock_file.return_value.write.call_args_list[0][0][0], 'YabaDaba ..example_block TestYabaDaba') # assert calls to rename block files self.assertEqual(os_mock.remove.call_count, 1) self.assertEqual(move_mock.call_count, 3) def test_blockcheck_command(self): self.maxDiff = None file_exists_path = 'nio_cli.commands.blockcheck.os.path.exists' getcwd_path = 'nio_cli.commands.blockcheck.os.getcwd' listdir_path = 'nio_cli.commands.blockcheck.os.listdir' subprocess_path = 'nio_cli.commands.blockcheck.subprocess.call' sys_exit_path = 'nio_cli.commands.blockcheck.sys.exit' print_path = 'nio_cli.commands.blockcheck.sys.stdout' json_load_path = 'nio_cli.commands.blockcheck.json.load' with patch('builtins.open', mock_open()) as mock_file, \ patch(file_exists_path) as mock_file_exists, \ patch(getcwd_path) as mock_getcwd, \ patch(listdir_path) as mock_listdir, \ patch(subprocess_path) as mock_subprocess_call, \ patch(sys_exit_path) as mock_sys_exit, \ patch(print_path, new_callable=StringIO) as mock_print, \ patch(json_load_path) as mock_json_load: mock_file_exists.return_value = True mock_getcwd.return_value = 'nio_lmnopio_block' mock_listdir.return_value = ['nio_lmnopio_block.py'] mock_json_load.side_effect = [ # json.load() for spec.json (prop1 missing description) { 'nio/nioLmnopio': { 'version': '0.1.0', 'description': 'spec description', 'properties': { 'prop1': { 'description': '' } }, 'inputs': {}, 'outputs': {}, 'commands': {}, } }, # json.load() for release.json (versions do not match) { 'nio/nioLmnopio': { 'language': 'Python', 'url': 'release url', 'version': '0.2.0', } } ] mock_file.return_value.readlines.side_effect = [ # .readlines() for nio_lmnopio_block.py [ 'class nioLmnopio(Block):', "version = VersionProperty('0.1.0')" ], # .readlines() for README.md (missing 'Outputs') [ 'nioLmnopio', 'Properties', 'Inputs', 'Commands', 'Dependencies' ] ] self._main('blockcheck') self.assertEqual( 'pycodestyle .', mock_subprocess_call.call_args_list[0][0][0]) # Check that print statements are run what_was_printed = mock_print.getvalue() self.assertIn('Checking PEP8 formatting ...', what_was_printed) self.assertIn('Checking spec.json formatting ...', what_was_printed) self.assertIn('Fill in the description for the "prop1" property ', what_was_printed) self.assertIn('in the nioLmnopio block', what_was_printed) self.assertIn('Checking README.md formatting ...', what_was_printed) self.assertIn('Add "Outputs" to the nioLmnopio block', what_was_printed) self.assertIn('Checking release.json formatting ...', what_was_printed) self.assertIn('Checking version formatting ...', what_was_printed) self.assertIn('The nioLmnopio version in the release file does not match ', what_was_printed) self.assertIn('the version in its block file', what_was_printed) self.assertIn('Spec.json and release.json versions do not match ', what_was_printed) self.assertIn('for nioLmnopio block', what_was_printed) self.assertIn('Checking class and file name formatting ...', what_was_printed) def test_add_user_command(self): """ Adds a user through the rest api""" with patch("nio_cli.commands.add_user.set_user") as set_user_patch: self._main('add_user', **{ '--project': 'testing_project', '<username>': 'user', '<password>': 'pwd' }) self.assertEqual(set_user_patch.call_count, 1) self.assertEqual(set_user_patch.call_args_list[0], call('testing_project', 'user', 'pwd')) from nio_cli.utils.users import set_user, _hash_password, \ _set_permissions with patch(set_user.__module__ + '.os') as mock_os, \ patch(set_user.__module__ + '.json') as mock_json, \ patch('builtins.open') as mock_open, \ patch('nio_cli.utils.users._hash_password') as mock_hash, \ patch('nio_cli.utils.users._set_permissions'): mock_os.path.isfile.return_value = True mock_hash.return_value = "AdminPwdHash" mock_json.load.return_value = {"Admin": "AdminPwd"} username = "user1" password = "pwd1" self._main('add_user', **{ '--project': 'testing_project', '<username>': username, '<password>': password }) # one call to read users.json and one to save users.json self.assertEqual(mock_open.call_count, 2) print(mock_json.dump.call_args_list) users, _ = mock_json.dump.call_args_list[0][0] self.assertIn(username, users) self.assertDictEqual(users[username], {"password": "AdminPwdHash"}) _set_permissions('testing_project', username, False) # make sure we open permissions.json two times # to read and write new permissions self.assertEqual(mock_open.call_count, 4) print(mock_json.dump.call_args_list) permissions, _ = mock_json.dump.call_args_list[0][0] self.assertIn(username, permissions) self.assertDictEqual(permissions[username], {".*": "rwx"}) def test_remove_user_command(self): """ Adds a user through the rest api""" with patch("nio_cli.commands.remove_user.remove_user") as \ remove_user_patch: self._main('remove_user', **{ '--project': 'testing_project', '<username>': 'user' }) self.assertEqual(remove_user_patch.call_count, 1) self.assertEqual(remove_user_patch.call_args_list[0], call('testing_project', 'user')) from nio_cli.commands.remove_user import RemoveUser, _remove_permission with patch(RemoveUser.__module__ + '.os') as mock_os, \ patch(RemoveUser.__module__ + '.json') as mock_json, \ patch('builtins.open') as mock_open, \ patch('nio_cli.commands.remove_user._remove_permission'): mock_os.path.isfile.return_value = True mock_json.load.return_value = {"Admin": "AdminPwd"} username = "Admin" self._main('remove_user', **{ '--project': 'testing_project', '<username>': username }) # one call to read users.json and one to save users.json self.assertEqual(mock_open.call_count, 2) users, _ = mock_json.dump.call_args_list[0][0] self.assertNotIn(username, users) self.assertEqual(len(users), 0) # make sure we open permissions.json two times # to read and write new permissions mock_json.load.return_value = {"Admin": {".*": "rwx"}} _remove_permission('testing_project', username) self.assertEqual(mock_open.call_count, 4) permissions, _ = mock_json.dump.call_args_list[0][0] self.assertNotIn(username, permissions) self.assertEqual(len(permissions), 0) def test_cleanup_host(self): cli_command = Base({}) self.assertEqual( cli_command._cleanup_host('localhost'), 'https://localhost') self.assertEqual( cli_command._cleanup_host('http://localhost'), 'http://localhost') self.assertEqual( cli_command._cleanup_host('https://localhost'), 'https://localhost') self.assertEqual( cli_command._cleanup_host('https://localhost:8181'), 'https://localhost:8181') self.assertEqual( cli_command._cleanup_host('https://localhost:8181/'), 'https://localhost:8181') def _main(self, command, **kwargs): args = { '--daemon': False, '--upgrade': False, '-u': False, '--template': False, '-t': False, } if command in ('new', 'config'): args['--ip'] = '127.0.0.1' args['--port'] = '8181' else: args['--instance-host'] = 'http://127.0.0.1:8181' args[command] = True for k, v in kwargs.items(): args[k] = v with patch('nio_cli.cli.docopt') as docopt: docopt.return_value = args cli.main()
apache-2.0
GabrielGanne/vpp-flowtable
src/vpp-api/java/jvpp-ioamexport/jvpp_ioam_export.h
1388
/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __included_jvpp_ioam_export_h__ #define __included_jvpp_ioam_export_h__ #include <vnet/vnet.h> #include <vnet/ip/ip.h> #include <vnet/api_errno.h> #include <vlibapi/api.h> #include <vlibmemory/api.h> #include <jni.h> /* Global state for JVPP-IOAM-EXPORT */ typedef struct { /* Base message index for the export plugin */ u16 msg_id_base; /* Pointer to shared memory queue */ unix_shared_memory_queue_t * vl_input_queue; /* VPP api client index */ u32 my_client_index; /* Callback object and class references enabling asynchronous Java calls */ jobject callbackObject; jclass callbackClass; } ioamexport_main_t; ioamexport_main_t ioamexport_main __attribute__((aligned (64))); #endif /* __included_jvpp_ioam_export_h__ */
apache-2.0
mdavid/nuget
src/VisualStudio/VsFileSystemProvider.cs
3640
using System; using System.ComponentModel.Composition; using System.Diagnostics.CodeAnalysis; using System.Linq; using EnvDTE; using EnvDTE80; using Microsoft.VisualStudio.ComponentModelHost; namespace NuGet.VisualStudio { [Export(typeof(IFileSystemProvider))] public class VsFileSystemProvider : IFileSystemProvider { private readonly DTE _dte; private readonly IComponentModel _componentModel; private readonly ISettings _settings; public VsFileSystemProvider() : this(ServiceLocator.GetInstance<DTE>(), ServiceLocator.GetGlobalService<SComponentModel, IComponentModel>(), ServiceLocator.GetInstance<ISettings>()) { } public VsFileSystemProvider(DTE dte, IComponentModel componentModel, ISettings settings) { if (dte == null) { throw new ArgumentNullException("dte"); } if (componentModel == null) { throw new ArgumentNullException("componentModel"); } if (settings == null) { throw new ArgumentNullException("settings"); } _componentModel = componentModel; _dte = dte; _settings = settings; } public IFileSystem GetFileSystem(string path) { // Get the source control providers var physicalFileSystem = new PhysicalFileSystem(path); if (_settings.IsSourceControlDisabled()) { return physicalFileSystem; } var providers = _componentModel.GetExtensions<ISourceControlFileSystemProvider>(); // Get the repository path IFileSystem fileSystem = null; var sourceControl = (SourceControl2)_dte.SourceControl; if (providers.Any() && sourceControl != null) { SourceControlBindings binding = null; try { // Get the binding for this solution binding = sourceControl.GetBindings(_dte.Solution.FullName); } catch (NotImplementedException) { // Some source control providers don't bother to implement this. // TFS might be the only one using it } if (binding != null) { fileSystem = providers.Select(provider => GetFileSystemFromProvider(provider, path, binding)) .FirstOrDefault(fs => fs != null); } } return fileSystem ?? physicalFileSystem; } [SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes", Justification = "We should never fail")] private static IFileSystem GetFileSystemFromProvider(ISourceControlFileSystemProvider provider, string path, SourceControlBindings binding) { try { return provider.GetFileSystem(path, binding); } catch (Exception exception) { ExceptionHelper.WriteToActivityLog(exception); // Ignore exceptions that can happen when some binaries are missing. e.g. TfsSourceControlFileSystemProvider // would throw a jitting error if TFS is not installed } return null; } } }
apache-2.0
zhangg/trove
trove/tests/unittests/guestagent/test_mongodb_manager.py
15121
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import pymongo import trove.common.db.mongodb.models as models import trove.common.utils as utils import trove.guestagent.backup as backup from trove.guestagent.common.configuration import ImportOverrideStrategy import trove.guestagent.datastore.experimental.mongodb.manager as manager import trove.guestagent.datastore.experimental.mongodb.service as service import trove.guestagent.volume as volume from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest class GuestAgentMongoDBManagerTest(DatastoreManagerTest): @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(GuestAgentMongoDBManagerTest, self).setUp('mongodb') self.manager = manager.Manager() self.execute_with_timeout_patch = mock.patch.object( utils, 'execute_with_timeout', return_value=('0', '') ) self.addCleanup(self.execute_with_timeout_patch.stop) self.execute_with_timeout_patch.start() self.pymongo_patch = mock.patch.object( pymongo, 'MongoClient' ) self.addCleanup(self.pymongo_patch.stop) self.pymongo_patch.start() self.mount_point = '/var/lib/mongodb' self.host_wildcard = '%' # This is used in the test_*_user tests below self.serialized_user = { '_name': 'testdb.testuser', '_password': None, '_roles': [{'db': 'testdb', 'role': 'testrole'}], '_username': 'testuser', '_databases': [], '_host': self.host_wildcard, '_database': {'_name': 'testdb', '_character_set': None, '_collate': None}, '_is_root': False } def tearDown(self): super(GuestAgentMongoDBManagerTest, self).tearDown() def test_update_status(self): self.manager.app.status = mock.MagicMock() self.manager.update_status(self.context) self.manager.app.status.update.assert_any_call() def _prepare_method(self, packages=['packages'], databases=None, memory_mb='2048', users=None, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None,): """self.manager.app must be correctly mocked before calling.""" self.manager.app.status = mock.Mock() self.manager.prepare(self.context, packages, databases, memory_mb, users, device_path=device_path, mount_point=mount_point, backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config) self.manager.app.status.begin_install.assert_any_call() self.manager.app.install_if_needed.assert_called_with(packages) self.manager.app.stop_db.assert_any_call() self.manager.app.clear_storage.assert_any_call() (self.manager.app.apply_initial_guestagent_configuration. assert_called_once_with(cluster_config, self.mount_point)) @mock.patch.object(volume, 'VolumeDevice') @mock.patch('os.path.exists') def test_prepare_for_volume(self, exists, mocked_volume): device_path = '/dev/vdb' self.manager.app = mock.Mock() self._prepare_method(device_path=device_path) mocked_volume().unmount_device.assert_called_with(device_path) mocked_volume().format.assert_any_call() mocked_volume().migrate_data.assert_called_with(self.mount_point) mocked_volume().mount.assert_called_with(self.mount_point) def test_secure(self): self.manager.app = mock.Mock() mock_secure = mock.Mock() self.manager.app.secure = mock_secure self._prepare_method() mock_secure.assert_called_with() @mock.patch.object(backup, 'restore') @mock.patch.object(service.MongoDBAdmin, 'is_root_enabled') def test_prepare_from_backup(self, mocked_root_check, mocked_restore): self.manager.app = mock.Mock() backup_info = {'id': 'backup_id_123abc', 'location': 'fake-location', 'type': 'MongoDBDump', 'checksum': 'fake-checksum'} self._prepare_method(backup_info=backup_info) mocked_restore.assert_called_with(self.context, backup_info, '/var/lib/mongodb') mocked_root_check.assert_any_call() def test_prepare_with_databases(self): self.manager.app = mock.Mock() database = mock.Mock() mock_create_databases = mock.Mock() self.manager.create_database = mock_create_databases self._prepare_method(databases=[database]) mock_create_databases.assert_called_with(self.context, [database]) def test_prepare_with_users(self): self.manager.app = mock.Mock() user = mock.Mock() mock_create_users = mock.Mock() self.manager.create_user = mock_create_users self._prepare_method(users=[user]) mock_create_users.assert_called_with(self.context, [user]) @mock.patch.object(service.MongoDBAdmin, 'enable_root') def test_provide_root_password(self, mocked_enable_root): self.manager.app = mock.Mock() self._prepare_method(root_password='test_password') mocked_enable_root.assert_called_with('test_password') @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record') def test_create_user(self, mocked_get_user, mocked_admin_user, mocked_client): user = self.serialized_user.copy() user['_password'] = 'testpassword' users = [user] client = mocked_client().__enter__()['testdb'] mocked_get_user.return_value = None self.manager.create_user(self.context, users) client.add_user.assert_called_with('testuser', password='testpassword', roles=[{'db': 'testdb', 'role': 'testrole'}]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_delete_user(self, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] self.manager.delete_user(self.context, self.serialized_user) client.remove_user.assert_called_with('testuser') @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_get_user(self, mocked_admin_user, mocked_client): mocked_find = mock.MagicMock(return_value={ '_id': 'testdb.testuser', 'user': 'testuser', 'db': 'testdb', 'roles': [{'db': 'testdb', 'role': 'testrole'}] }) client = mocked_client().__enter__().admin client.system.users.find_one = mocked_find result = self.manager.get_user(self.context, 'testdb.testuser', None) mocked_find.assert_called_with({'user': 'testuser', 'db': 'testdb'}) self.assertEqual(self.serialized_user, result) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_list_users(self, mocked_admin_user, mocked_client): # roles are NOT returned by list_users user1 = self.serialized_user.copy() user2 = self.serialized_user.copy() user2['_name'] = 'testdb.otheruser' user2['_username'] = 'otheruser' user2['_roles'] = [{'db': 'testdb2', 'role': 'readWrite'}] user2['_databases'] = [{'_name': 'testdb2', '_character_set': None, '_collate': None}] mocked_find = mock.MagicMock(return_value=[ { '_id': 'admin.os_admin', 'user': 'os_admin', 'db': 'admin', 'roles': [{'db': 'admin', 'role': 'root'}] }, { '_id': 'testdb.testuser', 'user': 'testuser', 'db': 'testdb', 'roles': [{'db': 'testdb', 'role': 'testrole'}] }, { '_id': 'testdb.otheruser', 'user': 'otheruser', 'db': 'testdb', 'roles': [{'db': 'testdb2', 'role': 'readWrite'}] } ]) client = mocked_client().__enter__().admin client.system.users.find = mocked_find users, next_marker = self.manager.list_users(self.context) self.assertIsNone(next_marker) self.assertEqual(sorted([user1, user2], key=lambda x: x['_name']), users) @mock.patch.object(service.MongoDBAdmin, 'create_validated_user') @mock.patch.object(utils, 'generate_random_password', return_value='password') def test_enable_root(self, mock_gen_rand_pwd, mock_create_user): root_user = {'_name': 'admin.root', '_username': 'root', '_database': {'_name': 'admin', '_character_set': None, '_collate': None}, '_password': 'password', '_roles': [{'db': 'admin', 'role': 'root'}], '_databases': [], '_host': self.host_wildcard, '_is_root': True} result = self.manager.enable_root(self.context) self.assertTrue(mock_create_user.called) self.assertEqual(root_user, result) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_grant_access(self, mocked_get_user, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] self.manager.grant_access(self.context, 'testdb.testuser', None, ['db1', 'db2', 'db3']) client.add_user.assert_called_with('testuser', roles=[ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_revoke_access(self, mocked_get_user, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] mocked_get_user.return_value.roles = [ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ] self.manager.revoke_access(self.context, 'testdb.testuser', None, 'db2') client.add_user.assert_called_with('testuser', roles=[ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_list_access(self, mocked_get_user, mocked_admin_user, mocked_client): mocked_get_user.return_value.roles = [ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ] accessible_databases = self.manager.list_access( self.context, 'testdb.testuser', None ) self.assertEqual(['db1', 'db2', 'db3'], [db['_name'] for db in accessible_databases]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_create_databases(self, mocked_admin_user, mocked_client): schema = models.MongoDBSchema('testdb').serialize() db_client = mocked_client().__enter__()['testdb'] self.manager.create_database(self.context, [schema]) db_client['dummy'].insert.assert_called_with({'dummy': True}) db_client.drop_collection.assert_called_with('dummy') @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_list_databases(self, # mocked_ignored_dbs, mocked_admin_user, mocked_client): # This list contains the special 'admin', 'local' and 'config' dbs; # the special dbs should be skipped in the output. # Pagination is tested by starting at 'db1', so 'db0' should not # be in the output. The limit is set to 2, meaning the result # should be 'db1' and 'db2'. The next_marker should be 'db3'. mocked_list = mock.MagicMock( return_value=['admin', 'local', 'config', 'db0', 'db1', 'db2', 'db3']) mocked_client().__enter__().database_names = mocked_list dbs, next_marker = self.manager.list_databases( self.context, limit=2, marker='db1', include_marker=True) mocked_list.assert_any_call() self.assertEqual([models.MongoDBSchema('db1').serialize(), models.MongoDBSchema('db2').serialize()], dbs) self.assertEqual('db2', next_marker) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_delete_database(self, mocked_admin_user, mocked_client): schema = models.MongoDBSchema('testdb').serialize() self.manager.delete_database(self.context, schema) mocked_client().__enter__().drop_database.assert_called_with('testdb')
apache-2.0
adamcrews/puppet-duplicity
spec/defines/profile_spec.rb
10421
require 'spec_helper' describe 'duplicity::profile' do let(:title) { 'default' } let(:facts) { {:concat_basedir => '/path/to/dir'} } let(:default_config_file) { '/etc/duply/default/conf' } let(:default_filelist) { '/etc/duply/default/exclude' } describe 'by default' do let(:params) { {} } it { should contain_file('/etc/duply/default').with( 'ensure' => 'directory', 'owner' => 'root', 'group' => 'root', 'mode' => '0700' ) } it { should contain_file('/etc/duply/default/conf').with( 'ensure' => 'file', 'owner' => 'root', 'group' => 'root', 'mode' => '0400' ) } it { should contain_concat('/etc/duply/default/exclude').with( 'ensure' => 'present', 'owner' => 'root', 'group' => 'root', 'mode' => '0400' ) } it { should contain_concat('/etc/duply/default/pre').with( 'ensure' => 'present', 'owner' => 'root', 'group' => 'root', 'mode' => '0700' ) } it { should contain_concat('/etc/duply/default/post').with( 'ensure' => 'present', 'owner' => 'root', 'group' => 'root', 'mode' => '0700' ) } it { should contain_file(default_config_file).with_content(/^# GPG_KEY='disabled'/) } it { should contain_file(default_config_file).with_content(/^GPG_KEYS_ENC=''$/) } it { should contain_file(default_config_file).with_content(/^GPG_KEY_SIGN='disabled'$/) } it { should contain_file(default_config_file).with_content(/^GPG_PW=''$/) } it { should contain_file(default_config_file).with_content(/^GPG_OPTS=''$/) } it { should contain_file(default_config_file).with_content(/^TARGET_USER=''$/) } it { should contain_file(default_config_file).with_content(/^TARGET_PASS=''$/) } it { should contain_file(default_config_file).without_content(/^MAX_FULLBKP_AGE=.*$/) } it { should contain_file(default_config_file).with_content(/^VOLSIZE=50$/) } it { should contain_concat__fragment("#{default_filelist}/exclude-by-default").with_content(/^\n\- \*\*$/) } it { should_not contain_concat__fragment("#{default_filelist}/include") } it { should_not contain_concat__fragment("#{default_filelist}/exclude") } specify { should contain_cron("backup-default").with_ensure('absent') } specify { should contain_file(default_config_file).with_content(/^SOURCE='\/'$/) } specify { should contain_file(default_config_file).with_content(/^TARGET='\/default'$/) } end describe 'with ensure absent' do let(:params) { {:ensure => 'absent'} } it { should contain_file('/etc/duply/default').with_ensure('absent') } it { should contain_file('/etc/duply/default/conf').with_ensure('absent') } it { should contain_file('/etc/duply/default/exclude').with_ensure('absent') } it { should contain_file('/etc/duply/default/pre').with_ensure('absent') } it { should contain_file('/etc/duply/default/post').with_ensure('absent') } end describe 'with invalid ensure' do let(:params) { {:ensure => 'foobar'} } it do expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /ensure/) end end describe 'with gpg_encryption => false' do let(:params) { {:gpg_encryption => false} } it { should contain_file(default_config_file).with_content(/^GPG_KEY='disabled'$/) } end describe 'with gpg_encryption => true' do let(:params) { {:gpg_encryption => true} } it { should contain_file(default_config_file).with_content(/^# GPG_KEY='disabled'/) } end describe 'with empty gpg_encryption_keys' do let(:params) { {:gpg_encryption_keys => ''} } it { should contain_file(default_config_file).with_content(/^GPG_KEYS_ENC=''$/) } end describe 'with gpg_encryption_keys => key1' do let(:params) { {:gpg_encryption_keys => 'key1'} } it { should contain_file(default_config_file).with_content(/^GPG_KEYS_ENC='key1'$/) } it { should contain_duplicity__public_key_link('default/key1') } end describe 'with gpg_encryption_keys => [key1]' do let(:params) { {:gpg_encryption_keys => ['key1']} } it { should contain_file(default_config_file).with_content(/^GPG_KEYS_ENC='key1'$/) } it { should contain_duplicity__public_key_link('default/key1') } end describe 'with gpg_encryption_keys => [key1,key2]' do let(:params) { {:gpg_encryption_keys => ['key1', 'key2']} } it { should contain_file(default_config_file).with_content(/^GPG_KEYS_ENC='key1,key2'$/) } it { should contain_duplicity__public_key_link('default/key1') } it { should contain_duplicity__public_key_link('default/key2') } end describe 'with invalid gpg_signing_key' do let(:params) { {:gpg_signing_key => 'invalid-key-id'} } it do expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /signing_key/) end end describe 'with gpg_signing_key => key1' do let(:params) { {:gpg_signing_key => 'key1'} } it { should contain_file(default_config_file).with_content(/^GPG_KEY_SIGN='key1'$/) } it { should contain_duplicity__private_key_link('default/key1') } end describe 'with gpg_passphrase => secret' do let(:params) { {:gpg_passphrase => 'secret'} } it { should contain_file(default_config_file).with_content(/^GPG_PW='secret'$/) } end describe 'with empty gpg_options' do let(:params) { {:gpg_options => ''} } specify { should contain_file(default_config_file).with_content(/^GPG_OPTS=''$/) } end describe 'with gpg_options => --switch' do let(:params) { {:gpg_options => '--switch'} } specify { should contain_file(default_config_file).with_content(/^GPG_OPTS='--switch'$/) } end describe 'with gpg_options => [--switch]' do let(:params) { {:gpg_options => ['--switch']} } it { should contain_file(default_config_file).with_content(/^GPG_OPTS='--switch'$/) } end describe 'with gpg_options => [--switch, --key=value]' do let(:params) { {:gpg_options => ['--switch', '--key=value']} } it { should contain_file(default_config_file).with_content(/^GPG_OPTS='--switch --key=value'$/) } end describe 'with empty source' do let(:params) { {:source => '' } } it do expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /source/) end end describe 'with source => /path/of/source' do let(:params) { {:source => '/path/of/source', } } it { should contain_file(default_config_file).with_content(/^SOURCE='\/path\/of\/source'$/) } end describe 'with empty target' do let(:params) { {:target => '', } } it do expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /target/) end end describe 'with target => http://example.com' do let(:params) { {:target => 'http://example.com', } } it { should contain_file(default_config_file).with_content(/^TARGET='http:\/\/example.com'$/) } end describe 'with target_username => johndoe' do let(:params) { {:target_username => 'johndoe'} } it { should contain_file(default_config_file).with_content(/^TARGET_USER='johndoe'$/) } end describe 'with target_password => secret' do let(:params) { {:target_password => 'secret'} } it { should contain_file(default_config_file).with_content(/^TARGET_PASS='secret'$/) } end describe 'should accept max_full_backups as integer' do let(:params) { {:max_full_backups => 5} } it { should contain_file(default_config_file).with_content(/^MAX_FULL_BACKUPS=5$/) } end describe 'should accept max_full_backups as string' do let(:params) { {:max_full_backups => '5'} } it { should contain_file(default_config_file).with_content(/^MAX_FULL_BACKUPS=5$/) } end describe 'should not accept any string as max_full_backups' do let(:params) { {:max_full_backups => 'invalid'} } specify { expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /max_full_backups/) } end describe 'with full_if_older_than => 1M' do let(:params) { {:full_if_older_than => '1M'} } it { should contain_file(default_config_file).with_content(/^MAX_FULLBKP_AGE=1M$/) } it { should contain_file(default_config_file).with_content(/^DUPL_PARAMS="\$DUPL_PARAMS --full-if-older-than \$MAX_FULLBKP_AGE "$/) } end describe 'with invalid volsize' do let(:params) { {:volsize => 'invalid'} } specify { expect { should contain_file(default_config_file) }.to raise_error(Puppet::Error, /volsize/) } end describe 'with volsize => 25' do let(:params) { {:volsize => 25} } it { should contain_file(default_config_file).with_content(/^VOLSIZE=25$/) } it { should contain_file(default_config_file).with_content(/^DUPL_PARAMS="\$DUPL_PARAMS --volsize \$VOLSIZE "$/) } end describe 'with include_files => "/a/b"' do let(:params) { {:include_filelist => ['/a/b']} } it { should contain_concat__fragment("#{default_filelist}/include").with_content(/^\+ \/a\/b$/) } end describe 'with invalid include_filelist' do let(:params) { {:include_filelist => 'invalid'} } specify { expect { should contain_concat__fragment("#{default_filelist}/include") }.to raise_error(Puppet::Error, /include_filelist/) } end describe 'with exclude_files => "/a/b"' do let(:params) { {:exclude_filelist => ['/a/b']} } it { should contain_concat__fragment("#{default_filelist}/exclude").with_content(/^\- \/a\/b$/) } end describe 'with invalid exclude_filelist' do let(:params) { {:exclude_filelist => 'invalid'} } specify { expect { should contain_concat__fragment("#{default_filelist}/exclude") }.to raise_error(Puppet::Error, /exclude_filelist/) } end describe 'with exclude_by_default => false' do let(:params) { {:exclude_by_default => false} } it { should contain_concat__fragment("#{default_filelist}/exclude-by-default").with_ensure('absent') } end describe 'with cron_enabled and cron_hour and cron_minute set' do let(:params) { {:cron_enabled => true, :cron_hour => '1', :cron_minute => '2'} } specify do should contain_cron("backup-default").with( 'ensure' => 'present', 'hour' => '1', 'minute' => '2' ) end end end
apache-2.0
vlsi/calcite
core/src/main/java/org/apache/calcite/prepare/RelOptTableImpl.java
17920
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.prepare; import org.apache.calcite.jdbc.CalciteSchema; import org.apache.calcite.linq4j.tree.Expression; import org.apache.calcite.materialize.Lattice; import org.apache.calcite.plan.RelOptSchema; import org.apache.calcite.plan.RelOptTable; import org.apache.calcite.rel.RelCollation; import org.apache.calcite.rel.RelDistribution; import org.apache.calcite.rel.RelDistributionTraitDef; import org.apache.calcite.rel.RelFieldCollation; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.RelReferentialConstraint; import org.apache.calcite.rel.logical.LogicalTableScan; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rel.type.RelProtoDataType; import org.apache.calcite.rel.type.RelRecordType; import org.apache.calcite.schema.ColumnStrategy; import org.apache.calcite.schema.FilterableTable; import org.apache.calcite.schema.ModifiableTable; import org.apache.calcite.schema.Path; import org.apache.calcite.schema.ProjectableFilterableTable; import org.apache.calcite.schema.QueryableTable; import org.apache.calcite.schema.ScannableTable; import org.apache.calcite.schema.Schema; import org.apache.calcite.schema.SchemaPlus; import org.apache.calcite.schema.SchemaVersion; import org.apache.calcite.schema.Schemas; import org.apache.calcite.schema.StreamableTable; import org.apache.calcite.schema.Table; import org.apache.calcite.schema.TemporalTable; import org.apache.calcite.schema.TranslatableTable; import org.apache.calcite.schema.Wrapper; import org.apache.calcite.sql.SqlAccessType; import org.apache.calcite.sql.validate.SqlModality; import org.apache.calcite.sql.validate.SqlMonotonicity; import org.apache.calcite.sql2rel.InitializerExpressionFactory; import org.apache.calcite.sql2rel.NullInitializerExpressionFactory; import org.apache.calcite.util.ImmutableBitSet; import org.apache.calcite.util.Pair; import org.apache.calcite.util.Util; import com.google.common.collect.ImmutableList; import java.util.AbstractList; import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.function.Function; /** * Implementation of {@link org.apache.calcite.plan.RelOptTable}. */ public class RelOptTableImpl extends Prepare.AbstractPreparingTable { private final RelOptSchema schema; private final RelDataType rowType; private final Table table; private final Function<Class, Expression> expressionFunction; private final ImmutableList<String> names; /** Estimate for the row count, or null. * * <p>If not null, overrides the estimate from the actual table. * * <p>Useful when a table that contains a materialized query result is being * used to replace a query expression that wildly underestimates the row * count. Now the materialized table can tell the same lie. */ private final Double rowCount; private RelOptTableImpl( RelOptSchema schema, RelDataType rowType, List<String> names, Table table, Function<Class, Expression> expressionFunction, Double rowCount) { this.schema = schema; this.rowType = Objects.requireNonNull(rowType); this.names = ImmutableList.copyOf(names); this.table = table; // may be null this.expressionFunction = expressionFunction; // may be null this.rowCount = rowCount; // may be null } public static RelOptTableImpl create( RelOptSchema schema, RelDataType rowType, List<String> names, Expression expression) { return new RelOptTableImpl(schema, rowType, names, null, c -> expression, null); } public static RelOptTableImpl create( RelOptSchema schema, RelDataType rowType, List<String> names, Table table, Expression expression) { return new RelOptTableImpl(schema, rowType, names, table, c -> expression, table.getStatistic().getRowCount()); } public static RelOptTableImpl create(RelOptSchema schema, RelDataType rowType, Table table, Path path) { final SchemaPlus schemaPlus = MySchemaPlus.create(path); return new RelOptTableImpl(schema, rowType, Pair.left(path), table, getClassExpressionFunction(schemaPlus, Util.last(path).left, table), table.getStatistic().getRowCount()); } public static RelOptTableImpl create(RelOptSchema schema, RelDataType rowType, final CalciteSchema.TableEntry tableEntry, Double rowCount) { final Table table = tableEntry.getTable(); return new RelOptTableImpl(schema, rowType, tableEntry.path(), table, getClassExpressionFunction(tableEntry, table), rowCount); } /** * Creates a copy of this RelOptTable. The new RelOptTable will have newRowType. */ public RelOptTableImpl copy(RelDataType newRowType) { return new RelOptTableImpl(this.schema, newRowType, this.names, this.table, this.expressionFunction, this.rowCount); } @Override public String toString() { return "RelOptTableImpl{" + "schema=" + schema + ", names= " + names + ", table=" + table + ", rowType=" + rowType + '}'; } private static Function<Class, Expression> getClassExpressionFunction( CalciteSchema.TableEntry tableEntry, Table table) { return getClassExpressionFunction(tableEntry.schema.plus(), tableEntry.name, table); } private static Function<Class, Expression> getClassExpressionFunction( final SchemaPlus schema, final String tableName, final Table table) { if (table instanceof QueryableTable) { final QueryableTable queryableTable = (QueryableTable) table; return clazz -> queryableTable.getExpression(schema, tableName, clazz); } else if (table instanceof ScannableTable || table instanceof FilterableTable || table instanceof ProjectableFilterableTable) { return clazz -> Schemas.tableExpression(schema, Object[].class, tableName, table.getClass()); } else if (table instanceof StreamableTable) { return getClassExpressionFunction(schema, tableName, ((StreamableTable) table).stream()); } else { return input -> { throw new UnsupportedOperationException(); }; } } public static RelOptTableImpl create(RelOptSchema schema, RelDataType rowType, Table table, ImmutableList<String> names) { assert table instanceof TranslatableTable || table instanceof ScannableTable || table instanceof ModifiableTable; return new RelOptTableImpl(schema, rowType, names, table, null, null); } public <T> T unwrap(Class<T> clazz) { if (clazz.isInstance(this)) { return clazz.cast(this); } if (clazz.isInstance(table)) { return clazz.cast(table); } if (table instanceof Wrapper) { final T t = ((Wrapper) table).unwrap(clazz); if (t != null) { return t; } } if (clazz == CalciteSchema.class) { return clazz.cast( Schemas.subSchema(((CalciteCatalogReader) schema).rootSchema, Util.skipLast(getQualifiedName()))); } return null; } public Expression getExpression(Class clazz) { if (expressionFunction == null) { return null; } return expressionFunction.apply(clazz); } @Override protected RelOptTable extend(Table extendedTable) { final RelDataType extendedRowType = extendedTable.getRowType(getRelOptSchema().getTypeFactory()); return new RelOptTableImpl(getRelOptSchema(), extendedRowType, getQualifiedName(), extendedTable, expressionFunction, getRowCount()); } @Override public boolean equals(Object obj) { return obj instanceof RelOptTableImpl && this.rowType.equals(((RelOptTableImpl) obj).getRowType()) && this.table == ((RelOptTableImpl) obj).table; } @Override public int hashCode() { return (this.table == null) ? super.hashCode() : this.table.hashCode(); } public double getRowCount() { if (rowCount != null) { return rowCount; } if (table != null) { final Double rowCount = table.getStatistic().getRowCount(); if (rowCount != null) { return rowCount; } } return 100d; } public RelOptSchema getRelOptSchema() { return schema; } public RelNode toRel(ToRelContext context) { // Make sure rowType's list is immutable. If rowType is DynamicRecordType, creates a new // RelOptTable by replacing with immutable RelRecordType using the same field list. if (this.getRowType().isDynamicStruct()) { final RelDataType staticRowType = new RelRecordType(getRowType().getFieldList()); final RelOptTable relOptTable = this.copy(staticRowType); return relOptTable.toRel(context); } // If there are any virtual columns, create a copy of this table without // those virtual columns. final List<ColumnStrategy> strategies = getColumnStrategies(); if (strategies.contains(ColumnStrategy.VIRTUAL)) { final RelDataTypeFactory.Builder b = context.getCluster().getTypeFactory().builder(); for (RelDataTypeField field : rowType.getFieldList()) { if (strategies.get(field.getIndex()) != ColumnStrategy.VIRTUAL) { b.add(field.getName(), field.getType()); } } final RelOptTable relOptTable = new RelOptTableImpl(this.schema, b.build(), this.names, this.table, this.expressionFunction, this.rowCount) { @Override public <T> T unwrap(Class<T> clazz) { if (clazz.isAssignableFrom(InitializerExpressionFactory.class)) { return clazz.cast(NullInitializerExpressionFactory.INSTANCE); } return super.unwrap(clazz); } }; return relOptTable.toRel(context); } if (table instanceof TranslatableTable) { return ((TranslatableTable) table).toRel(context, this); } return LogicalTableScan.create(context.getCluster(), this, context.getTableHints()); } public List<RelCollation> getCollationList() { if (table != null) { return table.getStatistic().getCollations(); } return ImmutableList.of(); } public RelDistribution getDistribution() { if (table != null) { return table.getStatistic().getDistribution(); } return RelDistributionTraitDef.INSTANCE.getDefault(); } public boolean isKey(ImmutableBitSet columns) { if (table != null) { return table.getStatistic().isKey(columns); } return false; } public List<ImmutableBitSet> getKeys() { return table.getStatistic().getKeys(); } public List<RelReferentialConstraint> getReferentialConstraints() { if (table != null) { return table.getStatistic().getReferentialConstraints(); } return ImmutableList.of(); } public RelDataType getRowType() { return rowType; } public boolean supportsModality(SqlModality modality) { switch (modality) { case STREAM: return table instanceof StreamableTable; default: return !(table instanceof StreamableTable); } } @Override public boolean isTemporal() { return table instanceof TemporalTable; } public List<String> getQualifiedName() { return names; } public SqlMonotonicity getMonotonicity(String columnName) { for (RelCollation collation : table.getStatistic().getCollations()) { final RelFieldCollation fieldCollation = collation.getFieldCollations().get(0); final int fieldIndex = fieldCollation.getFieldIndex(); if (fieldIndex < rowType.getFieldCount() && rowType.getFieldNames().get(fieldIndex).equals(columnName)) { return fieldCollation.direction.monotonicity(); } } return SqlMonotonicity.NOT_MONOTONIC; } public SqlAccessType getAllowedAccess() { return SqlAccessType.ALL; } /** Helper for {@link #getColumnStrategies()}. */ public static List<ColumnStrategy> columnStrategies(final RelOptTable table) { final int fieldCount = table.getRowType().getFieldCount(); final InitializerExpressionFactory ief = Util.first(table.unwrap(InitializerExpressionFactory.class), NullInitializerExpressionFactory.INSTANCE); return new AbstractList<ColumnStrategy>() { public int size() { return fieldCount; } public ColumnStrategy get(int index) { return ief.generationStrategy(table, index); } }; } /** Converts the ordinal of a field into the ordinal of a stored field. * That is, it subtracts the number of virtual fields that come before it. */ public static int realOrdinal(final RelOptTable table, int i) { List<ColumnStrategy> strategies = table.getColumnStrategies(); int n = 0; for (int j = 0; j < i; j++) { switch (strategies.get(j)) { case VIRTUAL: ++n; } } return i - n; } /** Returns the row type of a table after any {@link ColumnStrategy#VIRTUAL} * columns have been removed. This is the type of the records that are * actually stored. */ public static RelDataType realRowType(RelOptTable table) { final RelDataType rowType = table.getRowType(); final List<ColumnStrategy> strategies = columnStrategies(table); if (!strategies.contains(ColumnStrategy.VIRTUAL)) { return rowType; } final RelDataTypeFactory.Builder builder = table.getRelOptSchema().getTypeFactory().builder(); for (RelDataTypeField field : rowType.getFieldList()) { if (strategies.get(field.getIndex()) != ColumnStrategy.VIRTUAL) { builder.add(field); } } return builder.build(); } /** Implementation of {@link SchemaPlus} that wraps a regular schema and knows * its name and parent. * * <p>It is read-only, and functionality is limited in other ways, it but * allows table expressions to be generated. */ private static class MySchemaPlus implements SchemaPlus { private final SchemaPlus parent; private final String name; private final Schema schema; MySchemaPlus(SchemaPlus parent, String name, Schema schema) { this.parent = parent; this.name = name; this.schema = schema; } public static MySchemaPlus create(Path path) { final Pair<String, Schema> pair = Util.last(path); final SchemaPlus parent; if (path.size() == 1) { parent = null; } else { parent = create(path.parent()); } return new MySchemaPlus(parent, pair.left, pair.right); } @Override public SchemaPlus getParentSchema() { return parent; } @Override public String getName() { return name; } @Override public SchemaPlus getSubSchema(String name) { final Schema subSchema = schema.getSubSchema(name); return subSchema == null ? null : new MySchemaPlus(this, name, subSchema); } @Override public SchemaPlus add(String name, Schema schema) { throw new UnsupportedOperationException(); } @Override public void add(String name, Table table) { throw new UnsupportedOperationException(); } @Override public void add(String name, org.apache.calcite.schema.Function function) { throw new UnsupportedOperationException(); } @Override public void add(String name, RelProtoDataType type) { throw new UnsupportedOperationException(); } @Override public void add(String name, Lattice lattice) { throw new UnsupportedOperationException(); } @Override public boolean isMutable() { return schema.isMutable(); } @Override public <T> T unwrap(Class<T> clazz) { return null; } @Override public void setPath(ImmutableList<ImmutableList<String>> path) { throw new UnsupportedOperationException(); } @Override public void setCacheEnabled(boolean cache) { throw new UnsupportedOperationException(); } @Override public boolean isCacheEnabled() { return false; } @Override public Table getTable(String name) { return schema.getTable(name); } @Override public Set<String> getTableNames() { return schema.getTableNames(); } @Override public RelProtoDataType getType(String name) { return schema.getType(name); } @Override public Set<String> getTypeNames() { return schema.getTypeNames(); } @Override public Collection<org.apache.calcite.schema.Function> getFunctions(String name) { return schema.getFunctions(name); } @Override public Set<String> getFunctionNames() { return schema.getFunctionNames(); } @Override public Set<String> getSubSchemaNames() { return schema.getSubSchemaNames(); } @Override public Expression getExpression(SchemaPlus parentSchema, String name) { return schema.getExpression(parentSchema, name); } @Override public Schema snapshot(SchemaVersion version) { throw new UnsupportedOperationException(); } } }
apache-2.0
britter/commons-lang
src/main/java/org/apache/commons/lang3/function/FailableIntPredicate.java
3391
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.lang3.function; import java.util.Objects; import java.util.function.IntPredicate; /** * A functional interface like {@link IntPredicate} that declares a {@code Throwable}. * * @param <E> Thrown exception. * @since 3.11 */ @FunctionalInterface public interface FailableIntPredicate<E extends Throwable> { /** FALSE singleton */ @SuppressWarnings("rawtypes") FailableIntPredicate FALSE = t -> false; /** TRUE singleton */ @SuppressWarnings("rawtypes") FailableIntPredicate TRUE = t -> true; /** * Returns The FALSE singleton. * * @param <E> Thrown exception. * @return The NOP singleton. */ static <E extends Throwable> FailableIntPredicate<E> falsePredicate() { return FALSE; } /** * Returns The FALSE TRUE. * * @param <E> Thrown exception. * @return The NOP singleton. */ static <E extends Throwable> FailableIntPredicate<E> truePredicate() { return TRUE; } /** * Returns a composed {@code FailableIntPredicate} like {@link IntPredicate#and(IntPredicate)}. * * @param other a predicate that will be logically-ANDed with this predicate. * @return a composed {@code FailableIntPredicate} like {@link IntPredicate#and(IntPredicate)}. * @throws NullPointerException if other is null */ default FailableIntPredicate<E> and(final FailableIntPredicate<E> other) { Objects.requireNonNull(other); return t -> test(t) && other.test(t); } /** * Returns a predicate that negates this predicate. * * @return a predicate that negates this predicate. */ default FailableIntPredicate<E> negate() { return t -> !test(t); } /** * Returns a composed {@code FailableIntPredicate} like {@link IntPredicate#and(IntPredicate)}. * * @param other a predicate that will be logically-ORed with this predicate. * @return a composed {@code FailableIntPredicate} like {@link IntPredicate#and(IntPredicate)}. * @throws NullPointerException if other is null */ default FailableIntPredicate<E> or(final FailableIntPredicate<E> other) { Objects.requireNonNull(other); return t -> test(t) || other.test(t); } /** * Tests the predicate. * * @param value the parameter for the predicate to accept. * @return {@code true} if the input argument matches the predicate, {@code false} otherwise. * @throws E Thrown when the consumer fails. */ boolean test(int value) throws E; }
apache-2.0
migue/fabric8
components/fabric8-arquillian/src/main/java/io/fabric8/arquillian/kubernetes/await/SessionServicesAreReady.java
6012
/* * Copyright 2005-2015 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version * 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package io.fabric8.arquillian.kubernetes.await; import io.fabric8.arquillian.kubernetes.Configuration; import io.fabric8.arquillian.kubernetes.Session; import io.fabric8.kubernetes.api.KubernetesClient; import io.fabric8.kubernetes.api.model.EndpointAddress; import io.fabric8.kubernetes.api.model.EndpointSubset; import io.fabric8.kubernetes.api.model.Endpoints; import io.fabric8.kubernetes.api.model.Service; import io.fabric8.kubernetes.api.model.ServicePort; import io.fabric8.kubernetes.api.model.ServiceSpec; import java.net.InetSocketAddress; import java.net.Socket; import java.util.ArrayList; import java.util.List; import java.util.concurrent.Callable; import static io.fabric8.kubernetes.api.KubernetesHelper.getName; public class SessionServicesAreReady implements Callable<Boolean> { private final Session session; private final KubernetesClient kubernetesClient; private final Configuration configuration; public SessionServicesAreReady(KubernetesClient kubernetesClient, Session session, Configuration configuration) { this.session = session; this.kubernetesClient = kubernetesClient; this.configuration = configuration; } @Override public Boolean call() throws Exception { boolean result = true; List<Service> services = kubernetesClient.getServices(session.getNamespace()).getItems(); if (services.isEmpty()) { result = false; session.getLogger().warn("No services are available yet, waiting..."); } else if (configuration.isWaitForServiceConnection()) { for (Service s : filterServices(services, configuration.getWaitForServices())) { if (!isEndpointAvailable(s)) { result = false; break; } } } return result; } /** * Checks if there is an endpoint for the service available. * @param s The target service. * @return Returns true if a connection to at least one of the endpoints is possible. */ private boolean isEndpointAvailable(Service s) { String serviceStatus = null; boolean result = false; String sid = getName(s); //String namespace = s.getMetadata().getNamespace(); String namespace = kubernetesClient.getNamespace(); Endpoints endpoints = kubernetesClient.endpointsForService(sid, namespace); ServiceSpec spec = s.getSpec(); if (endpoints != null && spec != null) { List<EndpointSubset> subsets = endpoints.getSubsets(); if (subsets != null) { for (EndpointSubset subset : subsets) { List<EndpointAddress> addresses = subset.getAddresses(); if (addresses != null) { for (EndpointAddress address : addresses) { String ip = address.getIp(); String addr = ip; /* TODO v1beta2... String addr = endpoit.substring(0, endpoit.indexOf(":")); Integer port = Integer.parseInt(endpoit.substring(endpoit.indexOf(":") + 1)); */ List<ServicePort> ports = spec.getPorts(); for (ServicePort port : ports) { Integer portNumber = port.getPort(); if (portNumber != null && portNumber > 0) { if (configuration.isConnectToServices()) { try (Socket socket = new Socket()) { socket.connect(new InetSocketAddress(ip, portNumber), configuration.getServiceConnectionTimeout()); serviceStatus = "Service: " + sid + " is ready. Provider:" + addr + "."; return true; } catch (Exception e) { serviceStatus = "Service: " + sid + " is not ready! in namespace " + namespace + ". Error: " + e.getMessage(); } finally { session.getLogger().warn(serviceStatus); } } else { serviceStatus = "Service: " + sid + " is ready. Not testing connecting to it!. Provider:" + addr + "."; session.getLogger().warn(serviceStatus); return true; } } } } } } } } return result; } private List<Service> filterServices(List<Service> services, List<String> selectedIds) { if (selectedIds != null && !selectedIds.isEmpty()) { List<Service> result = new ArrayList<>(); for (Service s : services) { String sid = getName(s); if (selectedIds.contains(sid)) { result.add(s); } } return result; } else { return services; } } }
apache-2.0
OpenXIP/xip-libraries
src/extern/inventor/lib/nodekits/include/Inventor/misc/upgraders/SoV1AppearanceKit.h
4929
/* * * Copyright (C) 2000 Silicon Graphics, Inc. All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, * Mountain View, CA 94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan/ * */ // -*- C++ -*- /* * Copyright (C) 1990,91 Silicon Graphics, Inc. * _______________________________________________________________________ ______________ S I L I C O N G R A P H I C S I N C . ____________ | | $Revision: 1.1.1.1 $ | | Description: | Defines the SoV1AppearanceKit class. A parent node that manages | a collection of child nodes for | complete description of the graphical appearance. | | Author(s) : Paul Isaacs, Thad Beier | ______________ S I L I C O N G R A P H I C S I N C . ____________ _______________________________________________________________________ */ #ifndef _SO_V1_APPEARANCE_KIT_ #define _SO_V1_APPEARANCE_KIT_ #include <Inventor/misc/upgraders/SoV1BaseKit.h> #include <Inventor/SoLists.h> //////////////////////////////////////////////////////////////////// // Class: SoV1AppearanceKit // // A parent node that manages a collection of child nodes // for complete description of the graphical appearance. // // New nodes in this subclass are: // lightModel, drawStyle, material, complexity, texture2, and font // // The structure of the catalog for this class is: // // this // | // ----------------------------------------------------------------------- // | | | | | | | | // "label" | "environment" |"material" "complexity" | "font" // | | "texture2list" // "lightModel" "drawStyle" // // //////////////////////////////////////////////////////////////////// SoEXTENDER class SoV1AppearanceKit : public SoV1BaseKit { // Define typeId and name stuff SO_NODE_HEADER(SoV1AppearanceKit); // Define catalog for children SO_V1_SUBKIT_CATALOG_HEADER(SoV1AppearanceKit); public: // constructor SoV1AppearanceKit(); virtual SoNode *createNewNode(); // If tryToSetPartInNewNode fails, then this routine is called. // It will fail if the part read from file was called "texture2List" // This part has been changed to a single noded part, "texture2" // We will use just the first child of the list and set it as the "texture2" // It will also print a warning. virtual SbBool dealWithUpgradedPart( SoBaseKit *newNode, SoNode *newPart, const SbName &newPartName ); SoINTERNAL public: static void initClass(); static SoNodeList *getKitsWithUnusedTextureXfs(); static SoNodeList *getUnusedTextureXfs(); protected: // In the case where an SoV1Texture2 node has the translation rotation // scaleFactor or center field set, it will return a group containing // an SoTexture2Transform and an SoTexture2 node. // The SoV1AppearanceKit can not have a group part, so it pulls out the // SoTexture2 node and sets it as "transform." // Instead of just throwing away the SoTexture2Transform, it puts it in // this static variable so that other nodes can get at it. // For example, the SoV1GroupKit overloads the virtual function // setUpNewNode to get this texture2Transform and copy its // values into its "texture2Transform" part. static SoNodeList *kitsWithUnusedTextureXfs; static SoNodeList *unusedTextureXfs; virtual ~SoV1AppearanceKit(); }; #endif /* _SO_V1_APPEARANCE_KIT_ */
apache-2.0
locationtech/geowave
core/store/src/main/java/org/locationtech/geowave/core/store/memory/MemoryAdapterStore.java
3177
/** * Copyright (c) 2013-2020 Contributors to the Eclipse Foundation * * <p> See the NOTICE file distributed with this work for additional information regarding copyright * ownership. All rights reserved. This program and the accompanying materials are made available * under the terms of the Apache License, Version 2.0 which accompanies this distribution and is * available at http://www.apache.org/licenses/LICENSE-2.0.txt */ package org.locationtech.geowave.core.store.memory; import java.io.IOException; import java.io.Serializable; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.locationtech.geowave.core.index.persist.PersistenceUtils; import org.locationtech.geowave.core.store.adapter.TransientAdapterStore; import org.locationtech.geowave.core.store.api.DataTypeAdapter; /** * This is a simple HashMap based in-memory implementation of the AdapterStore and can be useful if * it is undesirable to persist and query objects within another storage mechanism such as an * Accumulo table. */ public class MemoryAdapterStore implements TransientAdapterStore, Serializable { /** */ private static final long serialVersionUID = 1L; private Map<String, DataTypeAdapter<?>> adapterMap; public MemoryAdapterStore() { adapterMap = Collections.synchronizedMap(new HashMap<String, DataTypeAdapter<?>>()); } public MemoryAdapterStore(final DataTypeAdapter<?>[] adapters) { adapterMap = Collections.synchronizedMap(new HashMap<String, DataTypeAdapter<?>>()); for (final DataTypeAdapter<?> adapter : adapters) { adapterMap.put(adapter.getTypeName(), adapter); } } @Override public void addAdapter(final DataTypeAdapter<?> adapter) { adapterMap.put(adapter.getTypeName(), adapter); } @Override public DataTypeAdapter<?> getAdapter(final String typeName) { return adapterMap.get(typeName); } @Override public boolean adapterExists(final String typeName) { return adapterMap.containsKey(typeName); } @Override public DataTypeAdapter<?>[] getAdapters() { return adapterMap.values().toArray(new DataTypeAdapter[adapterMap.size()]); } @Override public void removeAll() { adapterMap.clear(); } private void writeObject(final java.io.ObjectOutputStream out) throws IOException { final int count = adapterMap.size(); out.writeInt(count); for (final Map.Entry<String, DataTypeAdapter<?>> entry : adapterMap.entrySet()) { out.writeUTF(entry.getKey()); final byte[] val = PersistenceUtils.toBinary(entry.getValue()); out.writeObject(val); } } private void readObject(final java.io.ObjectInputStream in) throws IOException, ClassNotFoundException { final int count = in.readInt(); adapterMap = Collections.synchronizedMap(new HashMap<String, DataTypeAdapter<?>>()); for (int i = 0; i < count; i++) { final String id = in.readUTF(); final byte[] data = (byte[]) in.readObject(); adapterMap.put(id, (DataTypeAdapter<?>) PersistenceUtils.fromBinary(data)); } } @Override public void removeAdapter(final String typeName) { adapterMap.remove(typeName); } }
apache-2.0
lihongqiang/kettle-4.4.0-stable
src/org/pentaho/di/trans/steps/metainject/MetaInjectMeta.java
16453
/******************************************************************************* * * Pentaho Data Integration * * Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.trans.steps.metainject; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; import org.pentaho.di.core.Counter; import org.pentaho.di.core.ObjectLocationSpecificationMethod; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.exception.KettleStepException; import org.pentaho.di.core.exception.KettleXMLException; import org.pentaho.di.core.row.RowMetaInterface; import org.pentaho.di.core.variables.VariableSpace; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.repository.ObjectId; import org.pentaho.di.repository.Repository; import org.pentaho.di.repository.RepositoryDirectoryInterface; import org.pentaho.di.repository.StringObjectId; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.step.BaseStepMeta; import org.pentaho.di.trans.step.StepDataInterface; import org.pentaho.di.trans.step.StepInterface; import org.pentaho.di.trans.step.StepMeta; import org.pentaho.di.trans.step.StepMetaInterface; import org.w3c.dom.Node; /** * @since 2007-07-05 * @author matt * @version 3.0 */ public class MetaInjectMeta extends BaseStepMeta implements StepMetaInterface { private static Class<?> PKG = MetaInjectMeta.class; // for i18n purposes, needed by Translator2!! $NON-NLS-1$ // description of the transformation to execute... // private String transName; private String fileName; private String directoryPath; private ObjectId transObjectId; private ObjectLocationSpecificationMethod specificationMethod; private String sourceStepName; private Map<TargetStepAttribute, SourceStepField> targetSourceMapping; public MetaInjectMeta() { super(); // allocate BaseStepMeta specificationMethod=ObjectLocationSpecificationMethod.FILENAME; targetSourceMapping = new HashMap<TargetStepAttribute, SourceStepField>(); } public Object clone() { Object retval = super.clone(); return retval; } public void setDefault() { } public String getXML() { StringBuffer retval = new StringBuffer(500); retval.append(" ").append(XMLHandler.addTagValue("specification_method", specificationMethod == null ? null : specificationMethod.getCode())); retval.append(" ").append(XMLHandler.addTagValue("trans_object_id", transObjectId == null ? null : transObjectId.toString())); retval.append(" ").append(XMLHandler.addTagValue("trans_name", transName)); //$NON-NLS-1$ retval.append(" ").append(XMLHandler.addTagValue("filename", fileName)); //$NON-NLS-1$ retval.append(" ").append(XMLHandler.addTagValue("directory_path", directoryPath)); //$NON-NLS-1$ retval.append(" ").append(XMLHandler.addTagValue("source_step", sourceStepName)); //$NON-NLS-1$ retval.append(" ").append(XMLHandler.openTag("mappings")); for (TargetStepAttribute target : targetSourceMapping.keySet()) { retval.append(" ").append(XMLHandler.openTag("mapping")); SourceStepField source = targetSourceMapping.get(target); retval.append(" ").append(XMLHandler.addTagValue("target_step_name", target.getStepname())); retval.append(" ").append(XMLHandler.addTagValue("target_attribute_key", target.getAttributeKey())); retval.append(" ").append(XMLHandler.addTagValue("target_detail", target.isDetail())); retval.append(" ").append(XMLHandler.addTagValue("source_step", source.getStepname())); retval.append(" ").append(XMLHandler.addTagValue("source_field", source.getField())); retval.append(" ").append(XMLHandler.closeTag("mapping")); } retval.append(" ").append(XMLHandler.closeTag("mappings")); return retval.toString(); } public void loadXML(Node stepnode, List<DatabaseMeta> databases, Map<String, Counter> counters) throws KettleXMLException { try { String method = XMLHandler.getTagValue(stepnode, "specification_method"); specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); String transId = XMLHandler.getTagValue(stepnode, "trans_object_id"); transObjectId = Const.isEmpty(transId) ? null : new StringObjectId(transId); transName = XMLHandler.getTagValue(stepnode, "trans_name"); //$NON-NLS-1$ fileName = XMLHandler.getTagValue(stepnode, "filename"); //$NON-NLS-1$ directoryPath = XMLHandler.getTagValue(stepnode, "directory_path"); //$NON-NLS-1$ sourceStepName = XMLHandler.getTagValue(stepnode, "source_step"); //$NON-NLS-1$ Node mappingsNode = XMLHandler.getSubNode(stepnode, "mappings"); int nrMappings = XMLHandler.countNodes(mappingsNode, "mapping"); for (int i=0;i<nrMappings;i++) { Node mappingNode = XMLHandler.getSubNodeByNr(mappingsNode, "mapping", i); String targetStepname = XMLHandler.getTagValue(mappingNode, "target_step_name"); String targetAttributeKey = XMLHandler.getTagValue(mappingNode, "target_attribute_key"); boolean targetDetail = "Y".equalsIgnoreCase(XMLHandler.getTagValue(mappingNode, "target_detail")); String sourceStepname = XMLHandler.getTagValue(mappingNode, "source_step"); String sourceField = XMLHandler.getTagValue(mappingNode, "source_field"); TargetStepAttribute target = new TargetStepAttribute(targetStepname, targetAttributeKey, targetDetail); SourceStepField source = new SourceStepField(sourceStepname, sourceField); targetSourceMapping.put(target, source); } } catch (Exception e) { throw new KettleXMLException("Unable to load step info from XML", e); } } public void readRep(Repository rep, ObjectId id_step, List<DatabaseMeta> databases, Map<String, Counter> counters) throws KettleException { try { String method = rep.getStepAttributeString(id_step, "specification_method"); specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); String transId = rep.getStepAttributeString(id_step, "trans_object_id"); transObjectId = Const.isEmpty(transId) ? null : new StringObjectId(transId); transName = rep.getStepAttributeString(id_step, "trans_name"); //$NON-NLS-1$ fileName = rep.getStepAttributeString(id_step, "filename"); //$NON-NLS-1$ directoryPath = rep.getStepAttributeString(id_step, "directory_path"); //$NON-NLS-1$ sourceStepName = rep.getStepAttributeString(id_step, "source_step"); //$NON-NLS-1$ int nrMappings = rep.countNrStepAttributes(id_step, "mapping_target_step_name"); for (int i=0;i<nrMappings;i++) { String targetStepname = rep.getStepAttributeString(id_step, i, "mapping_target_step_name"); String targetAttributeKey = rep.getStepAttributeString(id_step, i, "mapping_target_attribute_key"); boolean targetDetail = rep.getStepAttributeBoolean(id_step, i, "mapping_target_detail"); String sourceStepname = rep.getStepAttributeString(id_step, i, "mapping_source_step"); String sourceField = rep.getStepAttributeString(id_step, i, "mapping_source_field"); TargetStepAttribute target = new TargetStepAttribute(targetStepname, targetAttributeKey, targetDetail); SourceStepField source = new SourceStepField(sourceStepname, sourceField); targetSourceMapping.put(target, source); } } catch (Exception e) { throw new KettleException("Unexpected error reading step information from the repository", e); } } public void saveRep(Repository rep, ObjectId id_transformation, ObjectId id_step) throws KettleException { try { rep.saveStepAttribute(id_transformation, id_step, "specification_method", specificationMethod==null ? null : specificationMethod.getCode()); rep.saveStepAttribute(id_transformation, id_step, "trans_object_id", transObjectId==null ? null : transObjectId.toString()); rep.saveStepAttribute(id_transformation, id_step, "filename", fileName); //$NON-NLS-1$ rep.saveStepAttribute(id_transformation, id_step, "trans_name", transName); //$NON-NLS-1$ rep.saveStepAttribute(id_transformation, id_step, "directory_path", directoryPath); //$NON-NLS-1$ rep.saveStepAttribute(id_transformation, id_step, "source_step", sourceStepName); //$NON-NLS-1$ List<TargetStepAttribute> keySet = new ArrayList<TargetStepAttribute>(targetSourceMapping.keySet()); for (int i=0;i<keySet.size();i++) { TargetStepAttribute target = keySet.get(i); SourceStepField source = targetSourceMapping.get(target); rep.saveStepAttribute(id_transformation, id_step, i, "mapping_target_step_name", target.getStepname()); rep.saveStepAttribute(id_transformation, id_step, i, "mapping_target_attribute_key", target.getAttributeKey()); rep.saveStepAttribute(id_transformation, id_step, i, "mapping_target_detail", target.isDetail()); rep.saveStepAttribute(id_transformation, id_step, i, "mapping_source_step", source.getStepname()); rep.saveStepAttribute(id_transformation, id_step, i, "mapping_source_field", source.getField()); } } catch (Exception e) { throw new KettleException("Unable to save step information to the repository for id_step=" + id_step, e); } } public void getFields(RowMetaInterface rowMeta, String origin, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space) throws KettleStepException { rowMeta.clear(); // No defined output is expected from this step. } public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, Trans trans) { return new MetaInject(stepMeta, stepDataInterface, cnr, tr, trans); } public StepDataInterface getStepData() { return new MetaInjectData(); } public Map<TargetStepAttribute, SourceStepField> getTargetSourceMapping() { return targetSourceMapping; } public void setTargetSourceMapping(Map<TargetStepAttribute, SourceStepField> targetSourceMapping) { this.targetSourceMapping = targetSourceMapping; } @Override public void check(List<CheckResultInterface> remarks, TransMeta transMeta, StepMeta stepMeta, RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info) { // TODO Auto-generated method stub } /** * @return the transName */ public String getTransName() { return transName; } /** * @param transName the transName to set */ public void setTransName(String transName) { this.transName = transName; } /** * @return the fileName */ public String getFileName() { return fileName; } /** * @param fileName the fileName to set */ public void setFileName(String fileName) { this.fileName = fileName; } /** * @return the directoryPath */ public String getDirectoryPath() { return directoryPath; } /** * @param directoryPath the directoryPath to set */ public void setDirectoryPath(String directoryPath) { this.directoryPath = directoryPath; } /** * @return the transObjectId */ public ObjectId getTransObjectId() { return transObjectId; } /** * @param transObjectId the transObjectId to set */ public void setTransObjectId(ObjectId transObjectId) { this.transObjectId = transObjectId; } /** * @return the specificationMethod */ public ObjectLocationSpecificationMethod getSpecificationMethod() { return specificationMethod; } /** * @param specificationMethod the specificationMethod to set */ public void setSpecificationMethod(ObjectLocationSpecificationMethod specificationMethod) { this.specificationMethod = specificationMethod; } public synchronized static final TransMeta loadTransformationMeta(MetaInjectMeta mappingMeta, Repository rep, VariableSpace space) throws KettleException { TransMeta mappingTransMeta = null; switch(mappingMeta.getSpecificationMethod()) { case FILENAME: String realFilename = space.environmentSubstitute(mappingMeta.getFileName()); try { // OK, load the meta-data from file... // // Don't set internal variables: they belong to the parent thread! // mappingTransMeta = new TransMeta(realFilename, false); mappingTransMeta.getLogChannel().logDetailed("Loading Mapping from repository", "Mapping transformation was loaded from XML file [" + realFilename + "]"); } catch (Exception e) { throw new KettleException(BaseMessages.getString(PKG, "MetaInjectMeta.Exception.UnableToLoadTransformationFromFile", realFilename), e); } break; case REPOSITORY_BY_NAME: String realTransname = space.environmentSubstitute(mappingMeta.getTransName()); String realDirectory = space.environmentSubstitute(mappingMeta.getDirectoryPath()); if (!Const.isEmpty(realTransname) && !Const.isEmpty(realDirectory) && rep != null) { RepositoryDirectoryInterface repdir = rep.findDirectory(realDirectory); if (repdir != null) { try { // reads the last revision in the repository... // mappingTransMeta = rep.loadTransformation(realTransname, repdir, null, true, null); mappingTransMeta.getLogChannel().logDetailed("Loading Mapping from repository", "Mapping transformation [" + realTransname + "] was loaded from the repository"); } catch (Exception e) { throw new KettleException("Unable to load transformation [" + realTransname + "]", e); } } else { throw new KettleException(BaseMessages.getString(PKG, "MetaInjectMeta.Exception.UnableToLoadTransformationFromRepository", realTransname, realDirectory)); //$NON-NLS-1$ //$NON-NLS-2$ } } break; case REPOSITORY_BY_REFERENCE: // Read the last revision by reference... mappingTransMeta = rep.loadTransformation(mappingMeta.getTransObjectId(), null); break; } // Pass some important information to the mapping transformation metadata: // mappingTransMeta.copyVariablesFrom(space); mappingTransMeta.setRepository(rep); mappingTransMeta.setFilename(mappingTransMeta.getFilename()); return mappingTransMeta; } @Override public boolean excludeFromCopyDistributeVerification() { return true; } @Override public boolean excludeFromRowLayoutVerification() { return true; } /** * @return the sourceStepName */ public String getSourceStepName() { return sourceStepName; } /** * @param sourceStepName the sourceStepName to set */ public void setSourceStepName(String sourceStepName) { this.sourceStepName = sourceStepName; } }
apache-2.0
android-art-intel/marshmallow
art-extension/runtime/arch/instruction_set.cc
4097
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "instruction_set.h" // Explicitly include our own elf.h to avoid Linux and other dependencies. #include "../elf.h" #include "globals.h" namespace art { const char* GetInstructionSetString(const InstructionSet isa) { switch (isa) { case kArm: case kThumb2: return "arm"; case kArm64: return "arm64"; case kX86: return "x86"; case kX86_64: return "x86_64"; case kMips: return "mips"; case kMips64: return "mips64"; case kNone: return "none"; default: LOG(FATAL) << "Unknown ISA " << isa; UNREACHABLE(); } } InstructionSet GetInstructionSetFromString(const char* isa_str) { CHECK(isa_str != nullptr); if (strcmp("arm", isa_str) == 0) { return kArm; } else if (strcmp("arm64", isa_str) == 0) { return kArm64; } else if (strcmp("x86", isa_str) == 0) { return kX86; } else if (strcmp("x86_64", isa_str) == 0) { return kX86_64; } else if (strcmp("mips", isa_str) == 0) { return kMips; } else if (strcmp("mips64", isa_str) == 0) { return kMips64; } return kNone; } InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) { switch (e_machine) { case EM_ARM: return kArm; case EM_AARCH64: return kArm64; case EM_386: return kX86; case EM_X86_64: return kX86_64; case EM_MIPS: { if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 || (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) { return kMips; } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) { return kMips64; } break; } } return kNone; } size_t GetInstructionSetAlignment(InstructionSet isa) { switch (isa) { case kArm: // Fall-through. case kThumb2: return kArmAlignment; case kArm64: return kArm64Alignment; case kX86: // Fall-through. case kX86_64: return kX86Alignment; case kMips: // Fall-through. case kMips64: return kMipsAlignment; case kNone: LOG(FATAL) << "ISA kNone does not have alignment."; UNREACHABLE(); default: LOG(FATAL) << "Unknown ISA " << isa; UNREACHABLE(); } } static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB; static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes; static constexpr size_t kMips64StackOverflowReservedBytes = kDefaultStackOverflowReservedBytes; static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB; static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB; static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB; static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB; size_t GetStackOverflowReservedBytes(InstructionSet isa) { switch (isa) { case kArm: // Intentional fall-through. case kThumb2: return kArmStackOverflowReservedBytes; case kArm64: return kArm64StackOverflowReservedBytes; case kMips: return kMipsStackOverflowReservedBytes; case kMips64: return kMips64StackOverflowReservedBytes; case kX86: return kX86StackOverflowReservedBytes; case kX86_64: return kX86_64StackOverflowReservedBytes; case kNone: LOG(FATAL) << "kNone has no stack overflow size"; UNREACHABLE(); default: LOG(FATAL) << "Unknown instruction set" << isa; UNREACHABLE(); } } } // namespace art
apache-2.0
weathersource/grib_api
src/encode_double_array.c
11692
/* * Copyright 2005-2017 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * * In applying this licence, ECMWF does not waive the privileges and immunities granted to it by * virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction. */ #ifdef _GET_IBM_COUNTER #include <libhpc.h> #endif static void encode_double_array_common(int numBits, long packStart, long datasize, GRIBPACK *lGrib, const double *data, double zref, double factor, long *gz) { long i, z = *gz; unsigned int ival; int cbits, jbits; unsigned int c; static unsigned int mask[] = {0,1,3,7,15,31,63,127,255}; /* code from gribw routine flist2bitstream */ cbits = 8; c = 0; for ( i = packStart; i < datasize; i++ ) { /* note float -> unsigned int .. truncate */ ival = (unsigned int) ((data[i] - zref) * factor + 0.5); /* if ( ival > max_nbpv_pow2 ) ival = max_nbpv_pow2; if ( ival < 0 ) ival = 0; */ jbits = numBits; while ( cbits <= jbits ) { if ( cbits == 8 ) { jbits -= 8; lGrib[z++] = (ival >> jbits) & 0xFF; } else { jbits -= cbits; lGrib[z++] = (c << cbits) + ((ival >> jbits) & mask[cbits]); cbits = 8; c = 0; } } /* now jbits < cbits */ if ( jbits ) { c = (c << jbits) + (ival & mask[jbits]); cbits -= jbits; } } if ( cbits != 8 ) lGrib[z++] = c << cbits; *gz = z; } static void encode_double_array_byte(int numBits, long packStart, long datasize, GRIBPACK *restrict lGrib, const double *restrict data, double zref, double factor, long *restrict gz) { long i, z = *gz; unsigned long ival; double tmp; data += packStart; datasize -= packStart; if ( numBits == 8 ) { #ifdef _GET_IBM_COUNTER hpmStart(2, "pack 8 bit base"); #endif #if defined (CRAY) #pragma _CRI ivdep #elif defined (SX) #pragma vdir nodep #elif defined (__uxp__) #pragma loop novrec #endif for ( i = 0; i < datasize; i++ ) { tmp = ((data[i] - zref) * factor + 0.5); ival = (unsigned long) tmp; lGrib[z ] = ival; z++; } #ifdef _GET_IBM_COUNTER hpmStop(2); #endif } else if ( numBits == 16 ) { #ifdef _GET_IBM_COUNTER hpmStart(3, "pack 16 bit base"); #endif #if defined (CRAY) #pragma _CRI ivdep #elif defined (SX) #pragma vdir nodep #elif defined (__uxp__) #pragma loop novrec #endif for ( i = 0; i < datasize; i++ ) { tmp = ((data[i] - zref) * factor + 0.5); ival = (unsigned long) tmp; lGrib[z ] = ival >> 8; lGrib[z+1] = ival; z += 2; } #ifdef _GET_IBM_COUNTER hpmStop(3); #endif } else if ( numBits == 24 ) { #ifdef _GET_IBM_COUNTER hpmStart(4, "pack 24 bit base"); #endif #if defined (CRAY) #pragma _CRI ivdep #elif defined (SX) #pragma vdir nodep #elif defined (__uxp__) #pragma loop novrec #endif for ( i = 0; i < datasize; i++ ) { tmp = ((data[i] - zref) * factor + 0.5); ival = (unsigned long) tmp; lGrib[z ] = ival >> 16; lGrib[z+1] = ival >> 8; lGrib[z+2] = ival; z += 3; } #ifdef _GET_IBM_COUNTER hpmStop(4); #endif } else if ( numBits == 32 ) { #ifdef _GET_IBM_COUNTER hpmStart(5, "pack 32 bit base"); #endif #if defined (CRAY) #pragma _CRI ivdep #elif defined (SX) #pragma vdir nodep #elif defined (__uxp__) #pragma loop novrec #endif for ( i = 0; i < datasize; i++ ) { tmp = ((data[i] - zref) * factor + 0.5); ival = (unsigned long) tmp; lGrib[z ] = ival >> 24; lGrib[z+1] = ival >> 16; lGrib[z+2] = ival >> 8; lGrib[z+3] = ival; z += 4; } #ifdef _GET_IBM_COUNTER hpmStop(5); #endif } else if ( numBits > 0 && numBits <= 32 ) { encode_double_array_common(numBits, 0, datasize, lGrib, data, zref, factor, &z); } else if ( numBits == 0 ) { } else { Error("Unimplemented packing factor %d!", numBits); } *gz = z; } static void encode_double_array_unrolled(int numBits, long packStart, long datasize, GRIBPACK *restrict lGrib, const double *restrict data, double zref, double factor, long *restrict gz) { U_BYTEORDER; long i, j, z = *gz; double tmp; #ifdef _ARCH_PWR6 #define __UNROLL_DEPTH_2 8 #else #define __UNROLL_DEPTH_2 8 #endif data += packStart; datasize -= packStart; { long residual = datasize % __UNROLL_DEPTH_2; long ofs = datasize - residual; double dval[__UNROLL_DEPTH_2]; unsigned long ival; /* reducing FP operations to single FMA is slowing down on pwr6 ... */ if ( numBits == 8 ) { unsigned char *cgrib = (unsigned char *) (lGrib + z); #ifdef _GET_IBM_COUNTER hpmStart(2, "pack 8 bit unrolled"); #endif for ( i = 0; i < datasize - residual; i += __UNROLL_DEPTH_2 ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { dval[j] = ((data[i+j] - zref) * factor + 0.5); } for (j = 0; j < __UNROLL_DEPTH_2; j++) { *cgrib = (unsigned long) dval[j]; cgrib++; z++; } } for (j = 0; j < residual; j++) { dval[j] = ((data[ofs+j] - zref) * factor + 0.5); } for (j = 0; j < residual; j++) { *cgrib = (unsigned long) dval[j]; cgrib++; z++; } #ifdef _GET_IBM_COUNTER hpmStop(2); #endif } else if ( numBits == 16 ) { unsigned short *sgrib = (unsigned short *) (lGrib + z); #ifdef _GET_IBM_COUNTER hpmStart(3, "pack 16 bit unrolled"); #endif for ( i = 0; i < datasize - residual; i += __UNROLL_DEPTH_2 ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { dval[j] = ((data[i+j] - zref) * factor + 0.5); } if ( IS_BIGENDIAN() ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { *sgrib = (unsigned long) dval[j]; sgrib++; z += 2; } } else { for (j = 0; j < __UNROLL_DEPTH_2; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 8; lGrib[z+1] = ival; z += 2; } } } for (j = 0; j < residual; j++) { dval[j] = ((data[ofs+j] - zref) * factor + 0.5); } if ( IS_BIGENDIAN() ) { for (j = 0; j < residual; j++) { *sgrib = (unsigned long) dval[j]; sgrib++; z += 2; } } else { for (j = 0; j < residual; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 8; lGrib[z+1] = ival; z += 2; } } #ifdef _GET_IBM_COUNTER hpmStop(3); #endif } else if ( numBits == 24 ) { #ifdef _GET_IBM_COUNTER hpmStart(4, "pack 24 bit unrolled"); #endif for ( i = 0; i < datasize - residual; i += __UNROLL_DEPTH_2 ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { dval[j] = ((data[i+j] - zref) * factor + 0.5); } for (j = 0; j < __UNROLL_DEPTH_2; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 16; lGrib[z+1] = ival >> 8; lGrib[z+2] = ival; z += 3; } } for (j = 0; j < residual; j++) { dval[j] = ((data[ofs+j] - zref) * factor + 0.5); } for (j = 0; j < residual; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 16; lGrib[z+1] = ival >> 8; lGrib[z+2] = ival; z += 3; } #ifdef _GET_IBM_COUNTER hpmStop(4); #endif } else if ( numBits == 32 ) { #ifdef _GET_IBM_COUNTER hpmStart(5, "pack 32 bit unrolled"); #endif unsigned int *igrib = (unsigned int *) (lGrib + z); for ( i = 0; i < datasize - residual; i += __UNROLL_DEPTH_2 ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { dval[j] = ((data[i+j] - zref) * factor + 0.5); } if ( IS_BIGENDIAN() ) { for (j = 0; j < __UNROLL_DEPTH_2; j++) { *igrib = (unsigned long) dval[j]; igrib++; z += 4; } } else { for (j = 0; j < __UNROLL_DEPTH_2; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 24; lGrib[z+1] = ival >> 16; lGrib[z+2] = ival >> 8; lGrib[z+3] = ival; z += 4; } } } for (j = 0; j < residual; j++) { dval[j] = ((data[ofs+j] - zref) * factor + 0.5); } if ( IS_BIGENDIAN() ) { for (j = 0; j < residual; j++) { *igrib = (unsigned long) dval[j]; igrib++; z += 4; } } else { for (j = 0; j < residual; j++) { ival = (unsigned long) dval[j]; lGrib[z ] = ival >> 24; lGrib[z+1] = ival >> 16; lGrib[z+2] = ival >> 8; lGrib[z+3] = ival; z += 4; } } #ifdef _GET_IBM_COUNTER hpmStop(5); #endif } else if ( numBits > 0 && numBits <= 32 ) { encode_double_array_common(numBits, 0, datasize, lGrib, data, zref, factor, &z); } else if ( numBits == 0 ) { } else { Error("Unimplemented packing factor %d!", numBits); } } *gz = z; #undef __UNROLL_DEPTH_2 }
apache-2.0
locationtech/geowave
extensions/cli/sentinel2/src/main/java/org/locationtech/geowave/format/sentinel2/theia/TheiaImageryProvider.java
17063
/** * Copyright (c) 2013-2020 Contributors to the Eclipse Foundation * * <p> See the NOTICE file distributed with this work for additional information regarding copyright * ownership. All rights reserved. This program and the accompanying materials are made available * under the terms of the Apache License, Version 2.0 which accompanies this distribution and is * available at http://www.apache.org/licenses/LICENSE-2.0.txt */ package org.locationtech.geowave.format.sentinel2.theia; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.net.URL; import java.net.URLEncoder; import java.security.GeneralSecurityException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Iterator; import java.util.Locale; import javax.net.ssl.HttpsURLConnection; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.geotools.coverage.grid.GridCoverage2D; import org.geotools.feature.simple.SimpleFeatureTypeBuilder; import org.locationtech.geowave.adapter.raster.plugin.gdal.GDALGeoTiffReader; import org.locationtech.geowave.adapter.raster.util.ZipUtils; import org.locationtech.geowave.format.sentinel2.BandFeatureIterator; import org.locationtech.geowave.format.sentinel2.DownloadRunner; import org.locationtech.geowave.format.sentinel2.RasterBandData; import org.locationtech.geowave.format.sentinel2.SceneFeatureIterator; import org.locationtech.geowave.format.sentinel2.Sentinel2ImageryProvider; import org.locationtech.jts.geom.Envelope; import org.opengis.feature.simple.SimpleFeature; import org.opengis.feature.simple.SimpleFeatureType; import org.opengis.referencing.FactoryException; import org.opengis.referencing.NoSuchAuthorityCodeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; import net.sf.json.JSONArray; import net.sf.json.JSONObject; /** Sentinel2 imagery provider for the Theia repository. See: https://theia.cnes.fr */ public class TheiaImageryProvider extends Sentinel2ImageryProvider { private static final Logger LOGGER = LoggerFactory.getLogger(TheiaImageryProvider.class); private static final String SCENES_TYPE_NAME = "theia-sentinel2-scene"; private static final String BANDS_TYPE_NAME = "theia-sentinel2-band"; private static final double NO_DATA_VALUE = 0; private static final String SCENES_SEARCH_URL = "https://theia.cnes.fr/atdistrib/resto2/api/collections/%s/search.json?"; private static final String AUNTHENTICATION_URL = "https://theia.cnes.fr/atdistrib/services/authenticate/"; private static final String DOWNLOAD_URL = "https://theia.cnes.fr/atdistrib/resto2/collections/%s/%s/download/?issuerId=theia"; private static final int DOWNLOAD_RETRY = 5; @Override public String providerName() { return "THEIA"; } @Override public String description() { return "Sentinel2 provider for the Theia repository (https://theia.cnes.fr)"; } @Override public String[] collections() { return new String[] {"SENTINEL2"}; } @Override public boolean isAvailable() { return true; } @Override public SimpleFeatureTypeBuilder sceneFeatureTypeBuilder() throws NoSuchAuthorityCodeException, FactoryException { return SceneFeatureIterator.defaultSceneFeatureTypeBuilder(SCENES_TYPE_NAME); } @Override public SimpleFeatureTypeBuilder bandFeatureTypeBuilder() throws NoSuchAuthorityCodeException, FactoryException { return BandFeatureIterator.defaultBandFeatureTypeBuilder(BANDS_TYPE_NAME); } @Override public Iterator<SimpleFeature> searchScenes( final File scenesDir, final String collection, final String platform, final String location, final Envelope envelope, final Date startDate, final Date endDate, final int orbitNumber, final int relativeOrbitNumber) throws IOException { final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); // Build the search URL to fetch products from Theia repository. String searchUrl = String.format(SCENES_SEARCH_URL, collection); if ((platform != null) && (platform.length() > 0)) { searchUrl += "platform=" + platform + "&"; } if ((location != null) && (location.length() > 0)) { searchUrl += "location=" + location + "&"; } if ((envelope != null) && (envelope.isNull() == false)) { searchUrl += String.format( Locale.ENGLISH, "box=%.6f,%.6f,%.6f,%.6f&", envelope.getMinX(), envelope.getMinY(), envelope.getMaxX(), envelope.getMaxY()); } if (startDate != null) { searchUrl += "startDate=" + dateFormat.format(startDate) + "&"; } if (endDate != null) { searchUrl += "completionDate=" + dateFormat.format(endDate) + "&"; } if (orbitNumber > 0) { searchUrl += "orbitNumber=" + orbitNumber + "&"; } if (relativeOrbitNumber > 0) { searchUrl += "relativeOrbitNumber=" + relativeOrbitNumber + "&"; } searchUrl = searchUrl.substring(0, searchUrl.length() - 1); // Fetch the JSON meta data with found Theia products. InputStream inputStream = null; ByteArrayOutputStream outputStream = null; try { final URL url = new URL(searchUrl); final HttpsURLConnection connection = (HttpsURLConnection) url.openConnection(); // HP Fortify "Certificate Validation" False Positive // we allow for custom trust store to anchor acceptable certs // to reduce the level of trust if desired connection.setUseCaches(false); connection.setRequestProperty(HttpHeaders.USER_AGENT, "Mozilla/5.0"); connection.setRequestMethod("GET"); // allow for custom trust store to anchor acceptable certs, use an // expected file in the workspace directory final File customCertsFile = new File(scenesDir.getParentFile(), "theia-keystore.crt"); applyCustomCertsFile(connection, customCertsFile); inputStream = connection.getInputStream(); // HP Fortify "Resource Shutdown" false positive // The InputStream is being closed in the finally block IOUtils.copyLarge(inputStream, outputStream = new ByteArrayOutputStream()); final String geoJson = new String(outputStream.toByteArray(), java.nio.charset.StandardCharsets.UTF_8); final JSONObject response = JSONObject.fromObject(geoJson); final JSONArray features = response.getJSONArray("features"); final SimpleFeatureTypeBuilder typeBuilder = sceneFeatureTypeBuilder(); final SimpleFeatureType type = typeBuilder.buildFeatureType(); class TheiaJSONFeatureIterator extends JSONFeatureIterator { public TheiaJSONFeatureIterator( final Sentinel2ImageryProvider provider, final SimpleFeatureType featureType, final Iterator<?> iterator) { super(provider, featureType, iterator); } @Override public SimpleFeature next() { final SimpleFeature feature = super.next(); JSONObject jsonObject = null; if ((feature != null) && ((jsonObject = super.currentObject()) != null)) { final JSONObject properties = (JSONObject) jsonObject.get("properties"); final String entityId = jsonObject.getString("id"); final String collection = properties.getString(SceneFeatureIterator.COLLECTION_ATTRIBUTE_NAME); final String downloadUrl = String.format(DOWNLOAD_URL, collection, entityId); feature.setAttribute(SceneFeatureIterator.SCENE_DOWNLOAD_ATTRIBUTE_NAME, downloadUrl); } return feature; } }; return new TheiaJSONFeatureIterator(this, type, features.iterator()); } catch (GeneralSecurityException | FactoryException e) { throw new IOException(e); } finally { if (outputStream != null) { IOUtils.closeQuietly(outputStream); outputStream = null; } if (inputStream != null) { IOUtils.closeQuietly(inputStream); inputStream = null; } } } @Override public boolean downloadScene( final SimpleFeature scene, final String workspaceDir, final String userIdent, final String password) throws IOException { final String tokenUrl = AUNTHENTICATION_URL; String authentication; String tokenId; final String collection = (String) scene.getAttribute(SceneFeatureIterator.COLLECTION_ATTRIBUTE_NAME); final String productId = (String) scene.getAttribute(SceneFeatureIterator.PRODUCT_ID_ATTRIBUTE_NAME); final String entityId = (String) scene.getAttribute(SceneFeatureIterator.ENTITY_ID_ATTRIBUTE_NAME); // Check authentication parameters if ((userIdent == null) || (userIdent.length() == 0) || (password == null) || (password.length() == 0)) { LOGGER.error("Invalid or empty authentication parameters (email and password)"); return false; } try { authentication = "ident=" + URLEncoder.encode(userIdent, "UTF-8") + "&pass=" + URLEncoder.encode(password, "UTF-8"); } catch (final UnsupportedEncodingException e) { LOGGER.error( "Invalid or empty authentication parameters (email and password)" + e.getMessage()); return false; } // Get a valid tokenId to download data InputStream inputStream = null; try { final URL url = new URL(tokenUrl); final HttpsURLConnection connection = (HttpsURLConnection) url.openConnection(); // HP Fortify "Certificate Validation" False Positive // we allow for custom trust store to anchor acceptable certs // to reduce the level of trust if desired connection.setUseCaches(false); connection.setRequestProperty(HttpHeaders.USER_AGENT, "Mozilla/5.0"); connection.setRequestMethod("POST"); connection.setDoOutput(true); connection.setRequestProperty( HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED); connection.setRequestProperty( HttpHeaders.CONTENT_LENGTH, String.valueOf(authentication.length())); // allow for custom trust store to anchor acceptable certs, use an // expected file in the workspace directory final File customCertsFile = new File(workspaceDir, "theia-keystore.crt"); applyCustomCertsFile(connection, customCertsFile); final OutputStream os = connection.getOutputStream(); // HP Fortify "Resource Shutdown" false positive // The OutputStream is being closed os.write(authentication.getBytes("UTF-8")); // HP Fortify "Privacy Violation" false positive // In this case the password is being sent to an output // stream in order to authenticate the system and allow // us to perform the requested download. os.flush(); os.close(); inputStream = connection.getInputStream(); // HP Fortify "Resource Shutdown" false positive // The InputStream is being closed in the finally block final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); IOUtils.copyLarge(inputStream, outputStream); tokenId = new String(outputStream.toByteArray(), java.nio.charset.StandardCharsets.UTF_8); IOUtils.closeQuietly(outputStream); } catch (final IOException | GeneralSecurityException e) { LOGGER.error("Unable to query a token to download '" + e.getMessage() + "'"); return false; } finally { if (inputStream != null) { IOUtils.closeQuietly(inputStream); inputStream = null; } } // Token is right? if (tokenId.length() == 0) { LOGGER.error("Unable to get a token to download. Check your ident and password"); return false; } // First steps to download the gzipped file final File compressedFile = new File( workspaceDir + File.separator + DOWNLOAD_DIRECTORY + File.separator + productId + ".zip"); final File productDir = DownloadRunner.getSceneDirectory(scene, workspaceDir); // Download the gzipped file final String downloadUrl = String.format(DOWNLOAD_URL, collection, entityId); int retry = 0; boolean success = false; while (!success && (retry < DOWNLOAD_RETRY)) { try { final ClientConfig clientConfig = new DefaultClientConfig(); final Client client = Client.create(clientConfig); final ClientResponse response = client.resource(downloadUrl).accept("application/zip").header( javax.ws.rs.core.HttpHeaders.USER_AGENT, "Mozilla/5.0").header( javax.ws.rs.core.HttpHeaders.AUTHORIZATION, "Bearer " + tokenId).get(ClientResponse.class); String displaySize = FileUtils.byteCountToDisplaySize(response.getLength()); System.out.println("\nDownloading file '" + productId + "' (" + displaySize + ")"); System.out.print("Wait please... "); inputStream = response.getEntityInputStream(); final FileOutputStream outputStream = new FileOutputStream(compressedFile); // HP Fortify "Resource Shutdown" false positive // The OutputStream is being closed copyLarge(inputStream, outputStream, response.getLength()); IOUtils.closeQuietly(outputStream); displaySize = FileUtils.byteCountToDisplaySize(compressedFile.length()); System.out.println("File successfully downloaded! (" + displaySize + ")"); ZipUtils.unZipFile(compressedFile, productDir.getAbsolutePath(), true); System.out.println("File successfully unzipped!"); if (!compressedFile.delete()) { LOGGER.warn("Unable to delete file '" + compressedFile.getAbsolutePath() + "'"); } success = true; } catch (final IOException e) { LOGGER.error( "Unable to read file from public '" + downloadUrl + "'; retry round " + ++retry, e); } finally { if (inputStream != null) { IOUtils.closeQuietly(inputStream); inputStream = null; } } } return success; } /** * Fetch the coverage of the specified band in the specified workspace directory */ @Override public RasterBandData getCoverage(final SimpleFeature band, final String workspaceDir) throws IOException { final File sceneDir = DownloadRunner.getSceneDirectory(band, workspaceDir); final String entityId = (String) band.getAttribute(SceneFeatureIterator.ENTITY_ID_ATTRIBUTE_NAME); final String productId = (String) band.getAttribute(SceneFeatureIterator.PRODUCT_ID_ATTRIBUTE_NAME); final String bandName = (String) band.getAttribute(BandFeatureIterator.BAND_ATTRIBUTE_NAME); final File file = sceneDir; final String[] fileList = sceneDir.list(); if (fileList != null) { for (final String name : fileList) { final File temp = new File(file.getAbsolutePath() + File.separatorChar + name); if (temp.isDirectory() && name.toUpperCase(Locale.ENGLISH).startsWith(productId.toUpperCase(Locale.ENGLISH))) { // We provide the coverage in ground reflectance with the // correction of slope effects. // The full description of the product format is here: // 'https://theia.cnes.fr/atdistrib/documents/PSC-NT-411-0362-CNES_01_00_SENTINEL-2A_L2A_Products_Description.pdf' // A more succinct one is also available here: // 'http://www.cesbio.ups-tlse.fr/multitemp/?page_id=8352' // final File geotiffFile = new File( file.getAbsolutePath() + File.separatorChar + name + File.separatorChar + name + "_FRE_" + bandName + ".tif"); if (geotiffFile.exists()) { final GDALGeoTiffReader reader = new GDALGeoTiffReader(geotiffFile); final GridCoverage2D coverage = reader.read(null); reader.dispose(); return new RasterBandData(entityId + "_" + bandName, coverage, reader, NO_DATA_VALUE); } } } } throw new IOException( "The file of the '" + productId + "_" + bandName + "' coverage does not exist"); } }
apache-2.0
Simran-B/arangodb
arangod/VocBase/Methods/UpgradeTasks.cpp
23269
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2014-2021 ArangoDB GmbH, Cologne, Germany /// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is ArangoDB GmbH, Cologne, Germany /// /// @author Simon Grätzer //////////////////////////////////////////////////////////////////////////////// #include "UpgradeTasks.h" #include "Agency/AgencyComm.h" #include "ApplicationFeatures/ApplicationServer.h" #include "Basics/Exceptions.h" #include "Basics/FileUtils.h" #include "Basics/VelocyPackHelper.h" #include "Basics/application-exit.h" #include "Basics/files.h" #include "ClusterEngine/ClusterEngine.h" #include "GeneralServer/AuthenticationFeature.h" #include "Logger/Logger.h" #include "RestServer/DatabaseFeature.h" #include "RestServer/SystemDatabaseFeature.h" #include "RocksDBEngine/RocksDBCommon.h" #include "RocksDBEngine/RocksDBEngine.h" #include "RocksDBEngine/RocksDBIndex.h" #include "StorageEngine/EngineSelectorFeature.h" #include "StorageEngine/PhysicalCollection.h" #include "Transaction/StandaloneContext.h" #include "Utils/OperationOptions.h" #include "VocBase/LogicalCollection.h" #include "VocBase/Methods/CollectionCreationInfo.h" #include "VocBase/Methods/Collections.h" #include "VocBase/Methods/Indexes.h" #include "VocBase/vocbase.h" #include <velocypack/Collection.h> #include <velocypack/velocypack-aliases.h> using namespace arangodb; using namespace arangodb::methods; using application_features::ApplicationServer; using basics::VelocyPackHelper; // Note: this entire file should run with superuser rights namespace { arangodb::Result recreateGeoIndex(TRI_vocbase_t& vocbase, arangodb::LogicalCollection& collection, arangodb::RocksDBIndex* oldIndex) { arangodb::Result res; IndexId iid = oldIndex->id(); VPackBuilder oldDesc; oldIndex->toVelocyPack(oldDesc, Index::makeFlags()); VPackBuilder overw; overw.openObject(); overw.add(arangodb::StaticStrings::IndexType, arangodb::velocypack::Value( arangodb::Index::oldtypeName(Index::TRI_IDX_TYPE_GEO_INDEX))); overw.close(); VPackBuilder newDesc = VPackCollection::merge(oldDesc.slice(), overw.slice(), false); bool dropped = collection.dropIndex(iid); if (!dropped) { res.reset(TRI_ERROR_INTERNAL); return res; } bool created = false; auto newIndex = collection.getPhysical()->createIndex(newDesc.slice(), /*restore*/ true, created); if (!created) { res.reset(TRI_ERROR_INTERNAL); } TRI_ASSERT(newIndex->id() == iid); // will break cluster otherwise TRI_ASSERT(newIndex->type() == Index::TRI_IDX_TYPE_GEO_INDEX); return res; } Result upgradeGeoIndexes(TRI_vocbase_t& vocbase) { if (!vocbase.server().getFeature<EngineSelectorFeature>().isRocksDB()) { LOG_TOPIC("2cb46", DEBUG, Logger::STARTUP) << "No need to upgrade geo indexes!"; return {}; } auto collections = vocbase.collections(false); for (auto collection : collections) { auto indexes = collection->getIndexes(); for (auto index : indexes) { RocksDBIndex* rIndex = static_cast<RocksDBIndex*>(index.get()); if (index->type() == Index::TRI_IDX_TYPE_GEO1_INDEX || index->type() == Index::TRI_IDX_TYPE_GEO2_INDEX) { LOG_TOPIC("5e53d", INFO, Logger::STARTUP) << "Upgrading legacy geo index '" << rIndex->id().id() << "'"; auto res = ::recreateGeoIndex(vocbase, *collection, rIndex); if (res.fail()) { LOG_TOPIC("5550a", ERR, Logger::STARTUP) << "Error upgrading geo indexes " << res.errorMessage(); return res; } } } } return {}; } Result createSystemCollections(TRI_vocbase_t& vocbase, std::vector<std::shared_ptr<LogicalCollection>>& createdCollections) { typedef std::function<void(std::shared_ptr<LogicalCollection> const&)> FuncCallback; FuncCallback const noop = [](std::shared_ptr<LogicalCollection> const&) -> void {}; OperationOptions options(ExecContext::current()); std::vector<CollectionCreationInfo> systemCollectionsToCreate; // the order of systemCollections is important. If we're in _system db, the // UsersCollection needs to be first, otherwise, the GraphsCollection must be first. std::vector<std::string> systemCollections; systemCollections.reserve(10); std::shared_ptr<LogicalCollection> colToDistributeShardsLike; Result res; if (vocbase.isSystem()) { // check for legacy sharding, could still be graphs. std::shared_ptr<LogicalCollection> coll; res = methods::Collections::lookup(vocbase, StaticStrings::GraphsCollection, coll); if (res.ok()) { TRI_ASSERT(coll); if (coll && coll.get()->distributeShardsLike().empty()) { // We have a graphs collection, and this is not sharded by something else. colToDistributeShardsLike = std::move(coll); } } if (colToDistributeShardsLike == nullptr) { // otherwise, we will use UsersCollection for distributeShardsLike res = methods::Collections::createSystem(vocbase, options, StaticStrings::UsersCollection, /*isNewDatabase*/ true, colToDistributeShardsLike); if (!res.ok()) { return res; } } else { systemCollections.push_back(StaticStrings::UsersCollection); } createdCollections.push_back(colToDistributeShardsLike); systemCollections.push_back(StaticStrings::GraphsCollection); systemCollections.push_back(StaticStrings::StatisticsCollection); systemCollections.push_back(StaticStrings::Statistics15Collection); systemCollections.push_back(StaticStrings::StatisticsRawCollection); } else { // we will use GraphsCollection for distributeShardsLike // this is equal to older versions res = methods::Collections::createSystem(vocbase, options, StaticStrings::GraphsCollection, /*isNewDatabase*/ true, colToDistributeShardsLike); if (!res.ok()) { return res; } createdCollections.push_back(colToDistributeShardsLike); } TRI_ASSERT(colToDistributeShardsLike != nullptr); systemCollections.push_back(StaticStrings::AnalyzersCollection); systemCollections.push_back(StaticStrings::AqlFunctionsCollection); systemCollections.push_back(StaticStrings::QueuesCollection); systemCollections.push_back(StaticStrings::JobsCollection); systemCollections.push_back(StaticStrings::AppsCollection); systemCollections.push_back(StaticStrings::AppBundlesCollection); systemCollections.push_back(StaticStrings::FrontendCollection); TRI_IF_FAILURE("UpgradeTasks::CreateCollectionsExistsGraphAqlFunctions") { VPackBuilder testOptions; std::vector<std::shared_ptr<VPackBuffer<uint8_t>>> testBuffers; std::vector<CollectionCreationInfo> testSystemCollectionsToCreate; std::vector<std::string> testSystemCollections = {StaticStrings::GraphsCollection, StaticStrings::AqlFunctionsCollection}; for (auto const& collection : testSystemCollections) { VPackBuilder options; methods::Collections::createSystemCollectionProperties(collection, options, vocbase); testSystemCollectionsToCreate.emplace_back( CollectionCreationInfo{collection, TRI_COL_TYPE_DOCUMENT, options.slice()}); testBuffers.emplace_back(options.steal()); } std::vector<std::shared_ptr<LogicalCollection>> cols; auto res = methods::Collections::create(vocbase, options, testSystemCollectionsToCreate, true, true, true, colToDistributeShardsLike, cols); // capture created collection vector createdCollections.insert(std::end(createdCollections), std::begin(cols), std::end(cols)); } std::vector<std::shared_ptr<VPackBuffer<uint8_t>>> buffers; for (auto const& cname : systemCollections) { std::shared_ptr<LogicalCollection> col; res = methods::Collections::lookup(vocbase, cname, col); if (col) { createdCollections.emplace_back(col); } if (res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { // if not found, create it VPackBuilder options; methods::Collections::createSystemCollectionProperties(cname, options, vocbase); systemCollectionsToCreate.emplace_back( CollectionCreationInfo{cname, TRI_COL_TYPE_DOCUMENT, options.slice()}); buffers.emplace_back(options.steal()); } } // We capture the vector of created LogicalCollections here // to use it to create indices later. if (systemCollectionsToCreate.size() > 0) { std::vector<std::shared_ptr<LogicalCollection>> cols; res = methods::Collections::create(vocbase, options, systemCollectionsToCreate, true, true, true, colToDistributeShardsLike, cols); if (res.fail()) { return res; } createdCollections.insert(std::end(createdCollections), std::begin(cols), std::end(cols)); } return {TRI_ERROR_NO_ERROR}; } Result createSystemStatisticsCollections(TRI_vocbase_t& vocbase, std::vector<std::shared_ptr<LogicalCollection>>& createdCollections) { if (vocbase.isSystem()) { typedef std::function<void(std::shared_ptr<LogicalCollection> const&)> FuncCallback; FuncCallback const noop = [](std::shared_ptr<LogicalCollection> const&) -> void {}; std::vector<CollectionCreationInfo> systemCollectionsToCreate; // the order of systemCollections is important. If we're in _system db, the // UsersCollection needs to be first, otherwise, the GraphsCollection must be first. std::vector<std::string> systemCollections; Result res; systemCollections.push_back(StaticStrings::StatisticsCollection); systemCollections.push_back(StaticStrings::Statistics15Collection); systemCollections.push_back(StaticStrings::StatisticsRawCollection); std::vector<std::shared_ptr<VPackBuffer<uint8_t>>> buffers; for (auto const& collection : systemCollections) { std::shared_ptr<LogicalCollection> col; res = methods::Collections::lookup(vocbase, collection, col); if (col) { createdCollections.emplace_back(std::move(col)); } if (res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { // if not found, create it VPackBuilder options; options.openObject(); options.add(StaticStrings::DataSourceSystem, VPackSlice::trueSlice()); options.add(StaticStrings::WaitForSyncString, VPackSlice::falseSlice()); options.close(); systemCollectionsToCreate.emplace_back( CollectionCreationInfo{collection, TRI_COL_TYPE_DOCUMENT, options.slice()}); buffers.emplace_back(options.steal()); } } // We capture the vector of created LogicalCollections here // to use it to create indices later. if (systemCollectionsToCreate.size() > 0) { std::vector<std::shared_ptr<LogicalCollection>> cols; OperationOptions options(ExecContext::current()); res = methods::Collections::create(vocbase, options, systemCollectionsToCreate, true, false, false, nullptr, cols); if (res.fail()) { return res; } // capture created collection vector createdCollections.insert(std::end(createdCollections), std::begin(cols), std::end(cols)); } } return {TRI_ERROR_NO_ERROR}; } static Result createIndex(std::string const& name, Index::IndexType type, std::vector<std::string> const& fields, bool unique, bool sparse, std::vector<std::shared_ptr<LogicalCollection>>& collections) { // Static helper function that wraps creating an index. If we fail to // create an index with some indices created, we clean up by removing all // collections later on. Find the collection by name auto colIt = std::find_if(collections.begin(), collections.end(), [&name](std::shared_ptr<LogicalCollection> const& col) { TRI_ASSERT(col != nullptr); return col->name() == name; }); if (colIt == collections.end()) { return Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND, "Collection " + name + " not found"); } return methods::Indexes::createIndex(colIt->get(), type, fields, unique, sparse, false /*estimates*/); } Result createSystemStatisticsIndices(TRI_vocbase_t& vocbase, std::vector<std::shared_ptr<LogicalCollection>>& collections) { Result res; if (vocbase.isSystem()) { res = ::createIndex(StaticStrings::StatisticsCollection, arangodb::Index::TRI_IDX_TYPE_SKIPLIST_INDEX, {"time"}, false, false, collections); if (!res.ok() && !res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { return res; } res = ::createIndex(StaticStrings::Statistics15Collection, arangodb::Index::TRI_IDX_TYPE_SKIPLIST_INDEX, {"time"}, false, false, collections); if (!res.ok() && !res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { return res; } res = ::createIndex(StaticStrings::StatisticsRawCollection, arangodb::Index::TRI_IDX_TYPE_SKIPLIST_INDEX, {"time"}, false, false, collections); if (!res.ok() && !res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { return res; } } return res; } Result createSystemCollectionsIndices(TRI_vocbase_t& vocbase, std::vector<std::shared_ptr<LogicalCollection>>& collections) { Result res; if (vocbase.isSystem()) { res = ::createIndex(StaticStrings::UsersCollection, arangodb::Index::TRI_IDX_TYPE_HASH_INDEX, {"user"}, true, true, collections); if (!res.ok()) { return res; } res = ::createSystemStatisticsIndices(vocbase, collections); if (!res.ok()) { return res; } } res = upgradeGeoIndexes(vocbase); if (!res.ok()) { return res; } res = ::createIndex(StaticStrings::AppsCollection, arangodb::Index::TRI_IDX_TYPE_HASH_INDEX, {"mount"}, true, true, collections); if (!res.ok()) { return res; } res = ::createIndex(StaticStrings::JobsCollection, arangodb::Index::TRI_IDX_TYPE_SKIPLIST_INDEX, {"queue", "status", "delayUntil"}, false, false, collections); if (!res.ok()) { return res; } res = ::createIndex(StaticStrings::JobsCollection, arangodb::Index::TRI_IDX_TYPE_SKIPLIST_INDEX, {"status", "queue", "delayUntil"}, false, false, collections); if (!res.ok()) { return res; } return res; } } // namespace bool UpgradeTasks::createSystemCollectionsAndIndices(TRI_vocbase_t& vocbase, arangodb::velocypack::Slice const& slice) { // after the call to ::createSystemCollections this vector should contain // a LogicalCollection for *every* (required) system collection. std::vector<std::shared_ptr<LogicalCollection>> presentSystemCollections; Result res = ::createSystemCollections(vocbase, presentSystemCollections); // TODO: Maybe check or assert that all collections are present (i.e. were // present or created), raise an error if not? if (res.fail()) { LOG_TOPIC("94824", ERR, Logger::STARTUP) << "could not create system collections" << ": error: " << res.errorMessage(); return false; } TRI_IF_FAILURE("UpgradeTasks::HideDatabaseUntilCreationIsFinished") { // just trigger a sleep here. The client test will create the db async // and directly fetch the state of creation. The DB is not allowed to be // visible to the outside world. std::this_thread::sleep_for(std::chrono::milliseconds(5000)); } TRI_IF_FAILURE("UpgradeTasks::FatalExitDuringDatabaseCreation") { FATAL_ERROR_EXIT(); } res = ::createSystemCollectionsIndices(vocbase, presentSystemCollections); if (res.fail()) { LOG_TOPIC("fedc0", ERR, Logger::STARTUP) << "could not create indices for system collections" << ": error: " << res.errorMessage(); return false; } return true; } bool UpgradeTasks::createStatisticsCollectionsAndIndices(TRI_vocbase_t& vocbase, arangodb::velocypack::Slice const& slice) { // This vector should after the call to ::createSystemCollections contain // a LogicalCollection for *every* (required) system collection. std::vector<std::shared_ptr<LogicalCollection>> presentSystemCollections; Result res; res = ::createSystemStatisticsCollections(vocbase, presentSystemCollections); if (res.fail()) { LOG_TOPIC("2824e", ERR, Logger::STARTUP) << "could not create system collections" << ": error: " << res.errorMessage(); return false; } res = ::createSystemStatisticsIndices(vocbase, presentSystemCollections); if (res.fail()) { LOG_TOPIC("dffbd", ERR, Logger::STARTUP) << "could not create indices for system collections" << ": error: " << res.errorMessage(); return false; } return true; } //////////////////////////////////////////////////////////////////////////////// /// @brief drops '_iresearch_analyzers' collection //////////////////////////////////////////////////////////////////////////////// bool UpgradeTasks::dropLegacyAnalyzersCollection(TRI_vocbase_t& vocbase, arangodb::velocypack::Slice const& /*upgradeParams*/) { // drop legacy collection if upgrading the system vocbase and collection found #ifdef ARANGODB_ENABLE_MAINTAINER_MODE if (!vocbase.server().hasFeature<arangodb::SystemDatabaseFeature>()) { LOG_TOPIC("8783e", WARN, Logger::STARTUP) << "failure to find '" << arangodb::SystemDatabaseFeature::name() << "' feature while registering legacy static analyzers with vocbase '" << vocbase.name() << "'"; TRI_set_errno(TRI_ERROR_INTERNAL); return false; // internal error } auto& sysDatabase = vocbase.server().getFeature<arangodb::SystemDatabaseFeature>(); auto sysVocbase = sysDatabase.use(); TRI_ASSERT(sysVocbase.get() == &vocbase || sysVocbase->name() == vocbase.name()); #endif // find legacy analyzer collection std::shared_ptr<arangodb::LogicalCollection> col; auto res = arangodb::methods::Collections::lookup(vocbase, StaticStrings::LegacyAnalyzersCollection, col); if (col) { res = arangodb::methods::Collections::drop(*col, true, -1.0); // -1.0 same as in RestCollectionHandler return res.ok(); } return res.is(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND); } bool UpgradeTasks::addDefaultUserOther(TRI_vocbase_t& vocbase, arangodb::velocypack::Slice const& params) { TRI_ASSERT(!vocbase.isSystem()); TRI_ASSERT(params.isObject()); VPackSlice users = params.get("users"); if (users.isNone()) { return true; // exit, no users were specified } else if (!users.isArray()) { LOG_TOPIC("44623", ERR, Logger::STARTUP) << "addDefaultUserOther: users is invalid"; return false; } auth::UserManager* um = AuthenticationFeature::instance()->userManager(); if (um == nullptr) { return true; // server does not support users } for (VPackSlice slice : VPackArrayIterator(users)) { std::string user = VelocyPackHelper::getStringValue(slice, "username", StaticStrings::Empty); if (user.empty()) { continue; } std::string passwd = VelocyPackHelper::getStringValue(slice, "passwd", ""); bool active = VelocyPackHelper::getBooleanValue(slice, "active", true); VPackSlice extra = slice.get("extra"); Result res = um->storeUser(false, user, passwd, active, VPackSlice::noneSlice()); if (res.fail() && !res.is(TRI_ERROR_USER_DUPLICATE)) { LOG_TOPIC("b5b8a", WARN, Logger::STARTUP) << "could not add database user " << user << ": " << res.errorMessage(); } else if (extra.isObject() && !extra.isEmptyObject()) { um->updateUser(user, [&](auth::User& user) { user.setUserData(VPackBuilder(extra)); return TRI_ERROR_NO_ERROR; }); } res = um->updateUser(user, [&](auth::User& entry) { entry.grantDatabase(vocbase.name(), auth::Level::RW); entry.grantCollection(vocbase.name(), "*", auth::Level::RW); return TRI_ERROR_NO_ERROR; }); if (res.fail()) { LOG_TOPIC("60019", WARN, Logger::STARTUP) << "could not set permissions for new user " << user << ": " << res.errorMessage(); } } return true; } bool UpgradeTasks::renameReplicationApplierStateFiles(TRI_vocbase_t& vocbase, arangodb::velocypack::Slice const& slice) { TRI_ASSERT(vocbase.server().getFeature<EngineSelectorFeature>().isRocksDB()); StorageEngine& engine = vocbase.server().getFeature<EngineSelectorFeature>().engine(); std::string const path = engine.databasePath(&vocbase); std::string const source = arangodb::basics::FileUtils::buildFilename(path, "REPLICATION-APPLIER-STATE"); if (!basics::FileUtils::isRegularFile(source)) { // source file does not exist return true; } bool result = true; // copy file REPLICATION-APPLIER-STATE to REPLICATION-APPLIER-STATE-<id> Result res = basics::catchToResult([&vocbase, &path, &source, &result]() -> Result { std::string const dest = arangodb::basics::FileUtils::buildFilename( path, "REPLICATION-APPLIER-STATE-" + std::to_string(vocbase.id())); LOG_TOPIC("75337", TRACE, Logger::STARTUP) << "copying replication applier file '" << source << "' to '" << dest << "'"; std::string error; if (!TRI_CopyFile(source, dest, error)) { LOG_TOPIC("6c90c", WARN, Logger::STARTUP) << "could not copy replication applier file '" << source << "' to '" << dest << "'"; result = false; } return Result(); }); if (res.fail()) { return false; } return result; }
apache-2.0
smalenfant/traffic_control
traffic_router/core/src/test/java/com/comcast/cdn/traffic_control/traffic_router/core/external/StatsTest.java
4915
/* * Copyright 2015 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.comcast.cdn.traffic_control.traffic_router.core.external; import com.comcast.cdn.traffic_control.traffic_router.core.util.ExternalTest; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.catalina.LifecycleException; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import java.util.HashMap; import java.util.Map; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.core.IsEqual.equalTo; @Category(ExternalTest.class) public class StatsTest { CloseableHttpClient httpClient; @Before public void before() throws LifecycleException { httpClient = HttpClientBuilder.create().build(); } @After public void after() throws Exception { if (httpClient != null) httpClient.close(); } @Test public void itGetsApplicationStats() throws Exception { HttpGet httpGet = new HttpGet("http://localhost:3333/crs/stats"); CloseableHttpResponse httpResponse = null; try { httpResponse = httpClient.execute(httpGet); String responseContent = EntityUtils.toString(httpResponse.getEntity()); ObjectMapper objectMapper = new ObjectMapper(); Map<String, Object> data = objectMapper.readValue(responseContent, new TypeReference<HashMap<String, Object>>() { }); assertThat(data.keySet(), containsInAnyOrder("app", "stats")); Map<String, Object> appData = (Map<String, Object>) data.get("app"); assertThat(appData.keySet(), containsInAnyOrder("buildTimestamp", "name", "deploy-dir", "git-revision", "version")); Map<String, Object> statsData = (Map<String, Object>) data.get("stats"); assertThat(statsData.keySet(), containsInAnyOrder("dnsMap", "httpMap", "totalDnsCount", "totalHttpCount", "totalDsMissCount", "appStartTime", "averageDnsTime", "averageHttpTime", "updateTracker")); Map<String, Object> dnsStats = (Map<String, Object>) statsData.get("dnsMap"); Map<String, Object> cacheDnsStats = (Map<String, Object>) dnsStats.values().iterator().next(); assertThat(cacheDnsStats.keySet(), containsInAnyOrder("czCount", "geoCount", "missCount", "dsrCount", "errCount", "staticRouteCount", "fedCount", "regionalDeniedCount", "regionalAlternateCount")); Map<String, Object> httpStats = (Map<String, Object>) statsData.get("httpMap"); Map<String, Object> cacheHttpStats = (Map<String, Object>) httpStats.values().iterator().next(); assertThat(cacheHttpStats.keySet(), containsInAnyOrder("czCount", "geoCount", "missCount", "dsrCount", "errCount", "staticRouteCount", "fedCount", "regionalDeniedCount", "regionalAlternateCount")); Map<String, Object> updateTracker = (Map<String, Object>) statsData.get("updateTracker"); assertThat(updateTracker.keySet(), hasItems("lastCacheStateCheck", "lastCacheStateChange", "lastConfigCheck", "lastConfigChange")); } finally { if (httpResponse != null) httpResponse.close(); } } @Test public void itGetsLocationsByIp() throws Exception { HttpGet httpGet = new HttpGet("http://localhost:3333/crs/stats/ip/8.8.8.8"); CloseableHttpResponse response = null; try { response = httpClient.execute(httpGet); String actual = EntityUtils.toString(response.getEntity()); Map<String, Object> data = new ObjectMapper().readValue(actual, new TypeReference<HashMap<String, Object>>() { }); assertThat((String) data.get("requestIp"), equalTo("8.8.8.8")); assertThat((String) data.get("locationByFederation"), equalTo("not found")); assertThat((String) data.get("locationByCoverageZone"), equalTo("not found")); Map<String, Object> locationByGeo = (Map<String, Object>) data.get("locationByGeo"); assertThat(locationByGeo.keySet(), containsInAnyOrder("city", "countryCode", "latitude", "longitude", "postalCode", "countryName")); } finally { if (response != null) response.close(); } } }
apache-2.0
snorden/parquet-mr-apache-parquet-1.7.0
parquet-hadoop/src/test/java/org/apache/parquet/hadoop/TestParquetFileWriter.java
21679
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.parquet.hadoop; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Test; import org.apache.parquet.Log; import org.apache.parquet.bytes.BytesInput; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DataPageV1; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.column.page.PageReader; import org.apache.parquet.column.statistics.BinaryStatistics; import org.apache.parquet.column.statistics.LongStatistics; import org.apache.parquet.format.Statistics; import org.apache.parquet.hadoop.metadata.*; import org.apache.parquet.hadoop.util.HiddenFileFilter; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.MessageTypeParser; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName; import java.io.File; import java.io.IOException; import java.util.*; import static org.junit.Assert.*; import static org.apache.parquet.column.Encoding.BIT_PACKED; import static org.apache.parquet.column.Encoding.PLAIN; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY; import static org.apache.parquet.schema.Type.Repetition.*; import static org.apache.parquet.hadoop.TestUtils.enforceEmptyDir; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.SimpleGroup; import org.apache.parquet.hadoop.example.GroupWriteSupport; public class TestParquetFileWriter { private static final Log LOG = Log.getLog(TestParquetFileWriter.class); private String writeSchema; @Test public void testWriteMode() throws Exception { File testDir = new File("target/test/TestParquetFileWriter/"); testDir.mkdirs(); File testFile = new File(testDir, "testParquetFile"); testFile = testFile.getAbsoluteFile(); testFile.createNewFile(); MessageType schema = MessageTypeParser.parseMessageType( "message m { required group a {required binary b;} required group " + "c { required int64 d; }}"); Configuration conf = new Configuration(); ParquetFileWriter writer = null; boolean exceptionThrown = false; Path path = new Path(testFile.toURI()); try { writer = new ParquetFileWriter(conf, schema, path, ParquetFileWriter.Mode.CREATE); } catch(IOException ioe1) { exceptionThrown = true; } assertTrue(exceptionThrown); exceptionThrown = false; try { writer = new ParquetFileWriter(conf, schema, path, ParquetFileWriter.Mode.OVERWRITE); } catch(IOException ioe2) { exceptionThrown = true; } assertTrue(!exceptionThrown); testFile.delete(); } @Test public void testWriteRead() throws Exception { File testFile = new File("target/test/TestParquetFileWriter/testParquetFile").getAbsoluteFile(); testFile.delete(); Path path = new Path(testFile.toURI()); Configuration configuration = new Configuration(); MessageType schema = MessageTypeParser.parseMessageType("message m { required group a {required binary b;} required group c { required int64 d; }}"); String[] path1 = {"a", "b"}; ColumnDescriptor c1 = schema.getColumnDescription(path1); String[] path2 = {"c", "d"}; ColumnDescriptor c2 = schema.getColumnDescription(path2); byte[] bytes1 = { 0, 1, 2, 3}; byte[] bytes2 = { 1, 2, 3, 4}; byte[] bytes3 = { 2, 3, 4, 5}; byte[] bytes4 = { 3, 4, 5, 6}; CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED; BinaryStatistics stats1 = new BinaryStatistics(); BinaryStatistics stats2 = new BinaryStatistics(); ParquetFileWriter w = new ParquetFileWriter(configuration, schema, path); w.start(); w.startBlock(3); w.startColumn(c1, 5, codec); long c1Starts = w.getPos(); w.writeDataPage(2, 4, BytesInput.from(bytes1), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(3, 4, BytesInput.from(bytes1), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); long c1Ends = w.getPos(); w.startColumn(c2, 6, codec); long c2Starts = w.getPos(); w.writeDataPage(2, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(3, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(1, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); long c2Ends = w.getPos(); w.endBlock(); w.startBlock(4); w.startColumn(c1, 7, codec); w.writeDataPage(7, 4, BytesInput.from(bytes3), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.startColumn(c2, 8, codec); w.writeDataPage(8, 4, BytesInput.from(bytes4), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.endBlock(); w.end(new HashMap<String, String>()); ParquetMetadata readFooter = ParquetFileReader.readFooter(configuration, path); assertEquals("footer: "+ readFooter, 2, readFooter.getBlocks().size()); assertEquals(c1Ends - c1Starts, readFooter.getBlocks().get(0).getColumns().get(0).getTotalSize()); assertEquals(c2Ends - c2Starts, readFooter.getBlocks().get(0).getColumns().get(1).getTotalSize()); assertEquals(c2Ends - c1Starts, readFooter.getBlocks().get(0).getTotalByteSize()); HashSet<Encoding> expectedEncoding=new HashSet<Encoding>(); expectedEncoding.add(PLAIN); expectedEncoding.add(BIT_PACKED); assertEquals(expectedEncoding,readFooter.getBlocks().get(0).getColumns().get(0).getEncodings()); { // read first block of col #1 ParquetFileReader r = new ParquetFileReader(configuration, path, Arrays.asList(readFooter.getBlocks().get(0)), Arrays.asList(schema.getColumnDescription(path1))); PageReadStore pages = r.readNextRowGroup(); assertEquals(3, pages.getRowCount()); validateContains(schema, pages, path1, 2, BytesInput.from(bytes1)); validateContains(schema, pages, path1, 3, BytesInput.from(bytes1)); assertNull(r.readNextRowGroup()); } { // read all blocks of col #1 and #2 ParquetFileReader r = new ParquetFileReader(configuration, path, readFooter.getBlocks(), Arrays.asList(schema.getColumnDescription(path1), schema.getColumnDescription(path2))); PageReadStore pages = r.readNextRowGroup(); assertEquals(3, pages.getRowCount()); validateContains(schema, pages, path1, 2, BytesInput.from(bytes1)); validateContains(schema, pages, path1, 3, BytesInput.from(bytes1)); validateContains(schema, pages, path2, 2, BytesInput.from(bytes2)); validateContains(schema, pages, path2, 3, BytesInput.from(bytes2)); validateContains(schema, pages, path2, 1, BytesInput.from(bytes2)); pages = r.readNextRowGroup(); assertEquals(4, pages.getRowCount()); validateContains(schema, pages, path1, 7, BytesInput.from(bytes3)); validateContains(schema, pages, path2, 8, BytesInput.from(bytes4)); assertNull(r.readNextRowGroup()); } PrintFooter.main(new String[] {path.toString()}); } @Test public void testConvertToThriftStatistics() throws Exception { long[] longArray = new long[] {39L, 99L, 12L, 1000L, 65L, 542L, 2533461316L, -253346131996L, Long.MAX_VALUE, Long.MIN_VALUE}; LongStatistics parquetMRstats = new LongStatistics(); for (long l: longArray) { parquetMRstats.updateStats(l); } Statistics thriftStats = org.apache.parquet.format.converter.ParquetMetadataConverter.toParquetStatistics(parquetMRstats); LongStatistics convertedBackStats = (LongStatistics) org.apache.parquet.format.converter.ParquetMetadataConverter.fromParquetStatistics(thriftStats, PrimitiveTypeName.INT64); assertEquals(parquetMRstats.getMax(), convertedBackStats.getMax()); assertEquals(parquetMRstats.getMin(), convertedBackStats.getMin()); assertEquals(parquetMRstats.getNumNulls(), convertedBackStats.getNumNulls()); } @Test public void testWriteReadStatistics() throws Exception { File testFile = new File("target/test/TestParquetFileWriter/testParquetFile").getAbsoluteFile(); testFile.delete(); Path path = new Path(testFile.toURI()); Configuration configuration = new Configuration(); MessageType schema = MessageTypeParser.parseMessageType("message m { required group a {required binary b;} required group c { required int64 d; }}"); String[] path1 = {"a", "b"}; ColumnDescriptor c1 = schema.getColumnDescription(path1); String[] path2 = {"c", "d"}; ColumnDescriptor c2 = schema.getColumnDescription(path2); byte[] bytes1 = { 0, 1, 2, 3}; byte[] bytes2 = { 1, 2, 3, 4}; byte[] bytes3 = { 2, 3, 4, 5}; byte[] bytes4 = { 3, 4, 5, 6}; CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED; BinaryStatistics statsB1C1P1 = new BinaryStatistics(); BinaryStatistics statsB1C1P2 = new BinaryStatistics(); LongStatistics statsB1C2P1 = new LongStatistics(); LongStatistics statsB1C2P2 = new LongStatistics(); BinaryStatistics statsB2C1P1 = new BinaryStatistics(); LongStatistics statsB2C2P1 = new LongStatistics(); statsB1C1P1.setMinMax(Binary.fromString("s"), Binary.fromString("z")); statsB1C1P2.setMinMax(Binary.fromString("a"), Binary.fromString("b")); statsB1C2P1.setMinMax(2l, 10l); statsB1C2P2.setMinMax(-6l, 4l); statsB2C1P1.setMinMax(Binary.fromString("d"), Binary.fromString("e")); statsB2C2P1.setMinMax(11l, 122l); ParquetFileWriter w = new ParquetFileWriter(configuration, schema, path); w.start(); w.startBlock(3); w.startColumn(c1, 5, codec); w.writeDataPage(2, 4, BytesInput.from(bytes1), statsB1C1P1, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(3, 4, BytesInput.from(bytes1), statsB1C1P2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.startColumn(c2, 6, codec); w.writeDataPage(3, 4, BytesInput.from(bytes2), statsB1C2P1, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(1, 4, BytesInput.from(bytes2), statsB1C2P2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.endBlock(); w.startBlock(4); w.startColumn(c1, 7, codec); w.writeDataPage(7, 4, BytesInput.from(bytes3), statsB2C1P1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.startColumn(c2, 8, codec); w.writeDataPage(8, 4, BytesInput.from(bytes4), statsB2C2P1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.endBlock(); w.end(new HashMap<String, String>()); ParquetMetadata readFooter = ParquetFileReader.readFooter(configuration, path); for (BlockMetaData block : readFooter.getBlocks()) { for (ColumnChunkMetaData col : block.getColumns()) { col.getPath(); } } // correct statistics BinaryStatistics bs1 = new BinaryStatistics(); bs1.setMinMax(Binary.fromString("a"), Binary.fromString("z")); LongStatistics ls1 = new LongStatistics(); ls1.setMinMax(-6l, 10l); BinaryStatistics bs2 = new BinaryStatistics(); bs2.setMinMax(Binary.fromString("d"), Binary.fromString("e")); LongStatistics ls2 = new LongStatistics(); ls2.setMinMax(11l, 122l); { // assert stats are correct for the first block BinaryStatistics bsout = (BinaryStatistics)readFooter.getBlocks().get(0).getColumns().get(0).getStatistics(); String str = new String(bsout.getMaxBytes()); String str2 = new String(bsout.getMinBytes()); assertTrue(((BinaryStatistics)readFooter.getBlocks().get(0).getColumns().get(0).getStatistics()).equals(bs1)); assertTrue(((LongStatistics)readFooter.getBlocks().get(0).getColumns().get(1).getStatistics()).equals(ls1)); } { // assert stats are correct for the second block assertTrue(((BinaryStatistics)readFooter.getBlocks().get(1).getColumns().get(0).getStatistics()).equals(bs2)); assertTrue(((LongStatistics)readFooter.getBlocks().get(1).getColumns().get(1).getStatistics()).equals(ls2)); } } @Test public void testMetaDataFile() throws Exception { File testDir = new File("target/test/TestParquetFileWriter/testMetaDataFileDir").getAbsoluteFile(); Path testDirPath = new Path(testDir.toURI()); Configuration configuration = new Configuration(); final FileSystem fs = testDirPath.getFileSystem(configuration); enforceEmptyDir(configuration, testDirPath); MessageType schema = MessageTypeParser.parseMessageType("message m { required group a {required binary b;} required group c { required int64 d; }}"); createFile(configuration, new Path(testDirPath, "part0"), schema); createFile(configuration, new Path(testDirPath, "part1"), schema); createFile(configuration, new Path(testDirPath, "part2"), schema); FileStatus outputStatus = fs.getFileStatus(testDirPath); List<Footer> footers = ParquetFileReader.readFooters(configuration, outputStatus, false); validateFooters(footers); ParquetFileWriter.writeMetadataFile(configuration, testDirPath, footers); footers = ParquetFileReader.readFooters(configuration, outputStatus, false); validateFooters(footers); footers = ParquetFileReader.readFooters(configuration, fs.getFileStatus(new Path(testDirPath, "part0")), false); assertEquals(1, footers.size()); final FileStatus metadataFile = fs.getFileStatus(new Path(testDirPath, ParquetFileWriter.PARQUET_METADATA_FILE)); final FileStatus metadataFileLight = fs.getFileStatus(new Path(testDirPath, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE)); final List<Footer> metadata = ParquetFileReader.readSummaryFile(configuration, metadataFile); validateFooters(metadata); footers = ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(configuration, Arrays.asList(fs.listStatus(testDirPath, HiddenFileFilter.INSTANCE)), false); validateFooters(footers); fs.delete(metadataFile.getPath(), false); fs.delete(metadataFileLight.getPath(), false); footers = ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(configuration, Arrays.asList(fs.listStatus(testDirPath)), false); validateFooters(footers); } @Test public void testWriteReadStatisticsAllNulls() throws Exception { File testFile = new File("target/test/TestParquetFileWriter/testParquetFile").getAbsoluteFile(); testFile.delete(); writeSchema = "message example {\n" + "required binary content;\n" + "}"; Path path = new Path(testFile.toURI()); MessageType schema = MessageTypeParser.parseMessageType(writeSchema); Configuration configuration = new Configuration(); GroupWriteSupport.setSchema(schema, configuration); ParquetWriter<Group> writer = new ParquetWriter<Group>(path, configuration, new GroupWriteSupport()); Group r1 = new SimpleGroup(schema); writer.write(r1); writer.close(); ParquetMetadata readFooter = ParquetFileReader.readFooter(configuration, path); // assert the statistics object is not empty assertTrue((readFooter.getBlocks().get(0).getColumns().get(0).getStatistics().isEmpty()) == false); // assert the number of nulls are correct for the first block assertEquals(1, (readFooter.getBlocks().get(0).getColumns().get(0).getStatistics().getNumNulls())); } private void validateFooters(final List<Footer> metadata) { LOG.debug(metadata); assertEquals(String.valueOf(metadata), 3, metadata.size()); for (Footer footer : metadata) { final File file = new File(footer.getFile().toUri()); assertTrue(file.getName(), file.getName().startsWith("part")); assertTrue(file.getPath(), file.exists()); final ParquetMetadata parquetMetadata = footer.getParquetMetadata(); assertEquals(2, parquetMetadata.getBlocks().size()); final Map<String, String> keyValueMetaData = parquetMetadata.getFileMetaData().getKeyValueMetaData(); assertEquals("bar", keyValueMetaData.get("foo")); assertEquals(footer.getFile().getName(), keyValueMetaData.get(footer.getFile().getName())); } } private void createFile(Configuration configuration, Path path, MessageType schema) throws IOException { String[] path1 = {"a", "b"}; ColumnDescriptor c1 = schema.getColumnDescription(path1); String[] path2 = {"c", "d"}; ColumnDescriptor c2 = schema.getColumnDescription(path2); byte[] bytes1 = { 0, 1, 2, 3}; byte[] bytes2 = { 1, 2, 3, 4}; byte[] bytes3 = { 2, 3, 4, 5}; byte[] bytes4 = { 3, 4, 5, 6}; CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED; BinaryStatistics stats1 = new BinaryStatistics(); BinaryStatistics stats2 = new BinaryStatistics(); ParquetFileWriter w = new ParquetFileWriter(configuration, schema, path); w.start(); w.startBlock(3); w.startColumn(c1, 5, codec); w.writeDataPage(2, 4, BytesInput.from(bytes1), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(3, 4, BytesInput.from(bytes1), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.startColumn(c2, 6, codec); w.writeDataPage(2, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(3, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.writeDataPage(1, 4, BytesInput.from(bytes2), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.endBlock(); w.startBlock(4); w.startColumn(c1, 7, codec); w.writeDataPage(7, 4, BytesInput.from(bytes3), stats1, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.startColumn(c2, 8, codec); w.writeDataPage(8, 4, BytesInput.from(bytes4), stats2, BIT_PACKED, BIT_PACKED, PLAIN); w.endColumn(); w.endBlock(); final HashMap<String, String> extraMetaData = new HashMap<String, String>(); extraMetaData.put("foo", "bar"); extraMetaData.put(path.getName(), path.getName()); w.end(extraMetaData); } private void validateContains(MessageType schema, PageReadStore pages, String[] path, int values, BytesInput bytes) throws IOException { PageReader pageReader = pages.getPageReader(schema.getColumnDescription(path)); DataPage page = pageReader.readPage(); assertEquals(values, page.getValueCount()); assertArrayEquals(bytes.toByteArray(), ((DataPageV1)page).getBytes().toByteArray()); } @Test public void testMergeMetadata() { FileMetaData md1 = new FileMetaData( new MessageType("root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), new HashMap<String, String>(), "test"); FileMetaData md2 = new FileMetaData( new MessageType("root2", new PrimitiveType(REQUIRED, BINARY, "c")), new HashMap<String, String>(), "test2"); GlobalMetaData merged = ParquetFileWriter.mergeInto(md2, ParquetFileWriter.mergeInto(md1, null)); assertEquals( merged.getSchema(), new MessageType("root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b"), new PrimitiveType(REQUIRED, BINARY, "c")) ); } @Test public void testMergeFooters() { List<BlockMetaData> oneBlocks = new ArrayList<BlockMetaData>(); oneBlocks.add(new BlockMetaData()); oneBlocks.add(new BlockMetaData()); List<BlockMetaData> twoBlocks = new ArrayList<BlockMetaData>(); twoBlocks.add(new BlockMetaData()); List<BlockMetaData> expected = new ArrayList<BlockMetaData>(); expected.addAll(oneBlocks); expected.addAll(twoBlocks); Footer one = new Footer(new Path("file:/tmp/output/one.parquet"), new ParquetMetadata(new FileMetaData( new MessageType("root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), new HashMap<String, String>(), "test"), oneBlocks)); Footer two = new Footer(new Path("/tmp/output/two.parquet"), new ParquetMetadata(new FileMetaData( new MessageType("root2", new PrimitiveType(REQUIRED, BINARY, "c")), new HashMap<String, String>(), "test2"), twoBlocks)); List<Footer> footers = new ArrayList<Footer>(); footers.add(one); footers.add(two); ParquetMetadata merged = ParquetFileWriter.mergeFooters( new Path("/tmp"), footers); assertEquals( new MessageType("root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b"), new PrimitiveType(REQUIRED, BINARY, "c")), merged.getFileMetaData().getSchema()); assertEquals("Should have all blocks", expected, merged.getBlocks()); } }
apache-2.0
saraguittarclark/cvtrainer
dist/index.html
1953
<!DOCTYPE html> <html ng-app="cvapp"> <head lang="en"> <meta charset="UTF-8"> <title>Computer Vision Trainer</title> <meta name="viewport" content="width=device-width, initial-scale=1"/> <link href='https://fonts.googleapis.com/css?family=Open+Sans|Lato:400,300' rel='stylesheet' type='text/css'/> <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css"/> <link rel="stylesheet" href="/styles/style.css"/> <link rel="stylesheet" href="/styles/style-images.css"/> <link rel="stylesheet" type="text/css" href="styles/animations.css"/> <link rel="icon" type="icon" href="/assets/star-favicon.png"/> </head> <body> <ui-view></ui-view> <div class="container-fluid" id="footer"> <a ui-sref="#">About Computer Vision Trainer</a> </div> <!--JQuery--> <script src="https://code.jquery.com/jquery-2.1.4.min.js"></script> <!-- AngularJS --> <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.7/angular.js"></script> <!--UI Router--> <script src="https://cdnjs.cloudflare.com/ajax/libs/angular-ui-router/0.2.15/angular-ui-router.js"></script> <!-- Firebase --> <script src="https://cdn.firebase.com/js/client/2.2.4/firebase.js"></script> <!-- AngularFire --> <script src="https://cdn.firebase.com/libs/angularfire/1.2.0/angularfire.min.js"></script> <!-- Google Charts --> <script src="https://cdnjs.cloudflare.com/ajax/libs/angular-google-chart/0.1.0/ng-google-chart.js" type="text/javascript"></script> <script src="/scripts/app.js"></script> <script src="/scripts/controllers/LandingCtrl.js"></script> <script src="/scripts/controllers/CVTrainerCtrl.js"></script> <!-- <script src="/scripts/services/FirebaseData.js"></script> --> <script src="/scripts/services/aiData.js"></script> <script src="/scripts/filters/percent.js"></script> <script src="/scripts/filters/startFrom.js"></script> </head> </body> </html>
apache-2.0
apache/lucenenet
src/Lucene.Net.Spatial/Prefix/TermQueryPrefixTreeStrategy.cs
2939
using Lucene.Net.Queries; using Lucene.Net.Search; using Lucene.Net.Spatial.Prefix.Tree; using Lucene.Net.Spatial.Queries; using Lucene.Net.Util; using Spatial4n.Shapes; using System; using System.Collections.Generic; namespace Lucene.Net.Spatial.Prefix { /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// <summary> /// A basic implementation of <see cref="PrefixTreeStrategy"/> using a large /// <see cref="TermsFilter"/> of all the cells from /// <see cref="SpatialPrefixTree.GetCells(IShape, int, bool, bool)"/>. /// It only supports the search of indexed Point shapes. /// <para/> /// The precision of query shapes (DistErrPct) is an important factor in using /// this Strategy. If the precision is too precise then it will result in many /// terms which will amount to a slower query. /// <para/> /// @lucene.experimental /// </summary> public class TermQueryPrefixTreeStrategy : PrefixTreeStrategy { public TermQueryPrefixTreeStrategy(SpatialPrefixTree grid, string fieldName) : base(grid, fieldName, false)//do not simplify indexed cells { } public override Filter MakeFilter(SpatialArgs args) { // LUCENENET specific - added guard clause if (args is null) throw new ArgumentNullException(nameof(args)); SpatialOperation op = args.Operation; if (op != SpatialOperation.Intersects) { throw new UnsupportedSpatialOperationException(op); } IShape shape = args.Shape; int detailLevel = m_grid.GetLevelForDistance(args.ResolveDistErr(m_ctx, m_distErrPct)); IList<Cell> cells = m_grid.GetCells(shape, detailLevel, false /*no parents*/, true /*simplify*/); var terms = new BytesRef[cells.Count]; int i = 0; foreach (Cell cell in cells) { terms[i++] = new BytesRef(cell.TokenString);//TODO use cell.getTokenBytes() } return new TermsFilter(FieldName, terms); } } }
apache-2.0
swift-lang/swift-k
src/org/griphyn/vdl/karajan/monitor/processors/coasters/WorkerActiveProcessor.java
2365
/* * Swift Parallel Scripting Language (http://swift-lang.org) * Code from Java CoG Kit Project (see notice below) with modifications. * * Copyright 2005-2014 University of Chicago * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //---------------------------------------------------------------------- //This code is developed as part of the Java CoG Kit project //The terms of the license can be found at http://www.cogkit.org/license //This message may not be removed or altered. //---------------------------------------------------------------------- /* * Created on Aug 7, 2013 */ package org.griphyn.vdl.karajan.monitor.processors.coasters; import org.griphyn.vdl.karajan.monitor.SystemState; import org.griphyn.vdl.karajan.monitor.items.StatefulItemClass; import org.griphyn.vdl.karajan.monitor.processors.SimpleParser; public class WorkerActiveProcessor extends AbstractRemoteLogProcessor { private CoasterStatusItem item; @Override public void initialize(SystemState state) { super.initialize(state); } @Override public String getMessageHeader() { return "WORKER_ACTIVE"; } @Override public void processMessage(SystemState state, SimpleParser p, Object details) { try { p.skip("blockid="); String blockId = p.word(); p.skip("id="); String workerId = p.word(); p.skip("node="); String node = p.word(); p.skip("cores="); int cores = Integer.parseInt(p.word()); CoasterStatusItem item = (CoasterStatusItem) state.getItemByID(CoasterStatusItem.ID, StatefulItemClass.MISC); item.workerActive(blockId, workerId, node, cores, state.getCurrentTime()); } catch (Exception e) { e.printStackTrace(); } } }
apache-2.0
rkboyce/DomeoClient
src/org/mindinformatics/gwt/domeo/client/ui/annotation/forms/IAllowsMultipleTargets.java
175
package org.mindinformatics.gwt.domeo.client.ui.annotation.forms; /** * @author Paolo Ciccarese <[email protected]> */ public interface IAllowsMultipleTargets { }
apache-2.0
cfieber/spectator
spectator-reg-servo/src/test/java/com/netflix/spectator/servo/ServoCounterTest.java
2163
/** * Copyright 2015 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.spectator.servo; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.ManualClock; import com.netflix.spectator.api.Registry; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import java.util.concurrent.TimeUnit; @RunWith(JUnit4.class) public class ServoCounterTest { private final ManualClock clock = new ManualClock(); private Counter newCounter(String name) { final Registry r = new ServoRegistry(clock); return r.counter(r.createId(name)); } @Before public void before() { clock.setWallTime(0L); clock.setMonotonicTime(0L); } @Test public void testInit() { Counter c = newCounter("foo"); Assert.assertEquals(c.count(), 0L); c.increment(); Assert.assertEquals(c.count(), 1L); } @Test public void expiration() { // Not expired on init clock.setWallTime(0L); Counter c = newCounter("foo"); Assert.assertTrue(!c.hasExpired()); c.increment(42); // Expires with inactivity, total count in memory is maintained clock.setWallTime(TimeUnit.MINUTES.toMillis(15)); Assert.assertTrue(!c.hasExpired()); // Expires with inactivity, total count in memory is maintained clock.setWallTime(TimeUnit.MINUTES.toMillis(15) + 1); Assert.assertEquals(c.count(), 42); Assert.assertTrue(c.hasExpired()); // Activity brings it back c.increment(); Assert.assertEquals(c.count(), 43); Assert.assertTrue(!c.hasExpired()); } }
apache-2.0
untitaker/rust
src/libarena/lib.rs
20496
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The arena, a fast but limited type of allocator. //! //! Arenas are a type of allocator that destroy the objects within, all at //! once, once the arena itself is destroyed. They do not support deallocation //! of individual objects while the arena itself is still alive. The benefit //! of an arena is very fast allocation; just a pointer bump. //! //! This crate has two arenas implemented: `TypedArena`, which is a simpler //! arena but can only hold objects of a single type, and `Arena`, which is a //! more complex, slower arena which can hold objects of any type. #![crate_name = "arena"] #![unstable(feature = "rustc_private")] #![staged_api] #![crate_type = "rlib"] #![crate_type = "dylib"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/nightly/")] #![feature(alloc)] #![feature(box_syntax)] #![feature(core)] #![feature(staged_api)] #![feature(unboxed_closures)] #![feature(unsafe_destructor)] #![cfg_attr(test, feature(test))] extern crate alloc; use std::cell::{Cell, RefCell}; use std::cmp; use std::intrinsics::{TyDesc, get_tydesc}; use std::intrinsics; use std::marker; use std::mem; use std::num::{Int, UnsignedInt}; use std::ptr; use std::rc::Rc; use std::rt::heap::{allocate, deallocate}; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array // will always stay at 0. #[derive(Clone, PartialEq)] struct Chunk { data: Rc<RefCell<Vec<u8>>>, fill: Cell<usize>, is_copy: Cell<bool>, } impl Chunk { fn capacity(&self) -> usize { self.data.borrow().capacity() } unsafe fn as_ptr(&self) -> *const u8 { self.data.borrow().as_ptr() } } /// A slower reflection-based arena that can allocate objects of any type. /// /// This arena uses `Vec<u8>` as a backing store to allocate objects from. For /// each allocated object, the arena stores a pointer to the type descriptor /// followed by the object (potentially with alignment padding after each /// element). When the arena is destroyed, it iterates through all of its /// chunks, and uses the tydesc information to trace through the objects, /// calling the destructors on them. One subtle point that needs to be /// addressed is how to handle panics while running the user provided /// initializer function. It is important to not run the destructor on /// uninitialized objects, but how to detect them is somewhat subtle. Since /// `alloc()` can be invoked recursively, it is not sufficient to simply exclude /// the most recent object. To solve this without requiring extra space, we /// use the low order bit of the tydesc pointer to encode whether the object /// it describes has been fully initialized. /// /// As an optimization, objects with destructors are stored in different chunks /// than objects without destructors. This reduces overhead when initializing /// plain-old-data (`Copy` types) and means we don't need to waste time running /// their destructors. pub struct Arena<'longer_than_self> { // The head is separated out from the list as a unbenchmarked // microoptimization, to avoid needing to case on the list to access the // head. head: RefCell<Chunk>, copy_head: RefCell<Chunk>, chunks: RefCell<Vec<Chunk>>, _marker: marker::PhantomData<*mut &'longer_than_self()>, } impl<'a> Arena<'a> { /// Allocates a new Arena with 32 bytes preallocated. pub fn new() -> Arena<'a> { Arena::new_with_size(32) } /// Allocates a new Arena with `initial_size` bytes preallocated. pub fn new_with_size(initial_size: usize) -> Arena<'a> { Arena { head: RefCell::new(chunk(initial_size, false)), copy_head: RefCell::new(chunk(initial_size, true)), chunks: RefCell::new(Vec::new()), _marker: marker::PhantomData, } } } fn chunk(size: usize, is_copy: bool) -> Chunk { Chunk { data: Rc::new(RefCell::new(Vec::with_capacity(size))), fill: Cell::new(0), is_copy: Cell::new(is_copy), } } #[unsafe_destructor] impl<'longer_than_self> Drop for Arena<'longer_than_self> { fn drop(&mut self) { unsafe { destroy_chunk(&*self.head.borrow()); for chunk in &*self.chunks.borrow() { if !chunk.is_copy.get() { destroy_chunk(chunk); } } } } } #[inline] fn round_up(base: usize, align: usize) -> usize { (base.checked_add(align - 1)).unwrap() & !(align - 1) } // Walk down a chunk, running the destructors for any objects stored // in it. unsafe fn destroy_chunk(chunk: &Chunk) { let mut idx = 0; let buf = chunk.as_ptr(); let fill = chunk.fill.get(); while idx < fill { let tydesc_data: *const usize = mem::transmute(buf.offset(idx as isize)); let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); let (size, align) = ((*tydesc).size, (*tydesc).align); let after_tydesc = idx + mem::size_of::<*const TyDesc>(); let start = round_up(after_tydesc, align); //debug!("freeing object: idx = {}, size = {}, align = {}, done = {}", // start, size, align, is_done); if is_done { ((*tydesc).drop_glue)(buf.offset(start as isize) as *const i8); } // Find where the next tydesc lives idx = round_up(start + size, mem::align_of::<*const TyDesc>()); } } // We encode whether the object a tydesc describes has been // initialized in the arena in the low bit of the tydesc pointer. This // is necessary in order to properly do cleanup if a panic occurs // during an initializer. #[inline] fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> usize { p as usize | (is_done as usize) } #[inline] fn un_bitpack_tydesc_ptr(p: usize) -> (*const TyDesc, bool) { ((p & !1) as *const TyDesc, p & 1 == 1) } impl<'longer_than_self> Arena<'longer_than_self> { fn chunk_size(&self) -> usize { self.copy_head.borrow().capacity() } // Functions for the POD part of the arena fn alloc_copy_grow(&self, n_bytes: usize, align: usize) -> *const u8 { // Allocate a new chunk. let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size()); self.chunks.borrow_mut().push(self.copy_head.borrow().clone()); *self.copy_head.borrow_mut() = chunk((new_min_chunk_size + 1).next_power_of_two(), true); return self.alloc_copy_inner(n_bytes, align); } #[inline] fn alloc_copy_inner(&self, n_bytes: usize, align: usize) -> *const u8 { let start = round_up(self.copy_head.borrow().fill.get(), align); let end = start + n_bytes; if end > self.chunk_size() { return self.alloc_copy_grow(n_bytes, align); } let copy_head = self.copy_head.borrow(); copy_head.fill.set(end); unsafe { copy_head.as_ptr().offset(start as isize) } } #[inline] fn alloc_copy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { unsafe { let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::min_align_of::<T>()); let ptr = ptr as *mut T; ptr::write(&mut (*ptr), op()); return &mut *ptr; } } // Functions for the non-POD part of the arena fn alloc_noncopy_grow(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) { // Allocate a new chunk. let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size()); self.chunks.borrow_mut().push(self.head.borrow().clone()); *self.head.borrow_mut() = chunk((new_min_chunk_size + 1).next_power_of_two(), false); return self.alloc_noncopy_inner(n_bytes, align); } #[inline] fn alloc_noncopy_inner(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) { // Be careful to not maintain any `head` borrows active, because // `alloc_noncopy_grow` borrows it mutably. let (start, end, tydesc_start, head_capacity) = { let head = self.head.borrow(); let fill = head.fill.get(); let tydesc_start = fill; let after_tydesc = fill + mem::size_of::<*const TyDesc>(); let start = round_up(after_tydesc, align); let end = start + n_bytes; (start, end, tydesc_start, head.capacity()) }; if end > head_capacity { return self.alloc_noncopy_grow(n_bytes, align); } let head = self.head.borrow(); head.fill.set(round_up(end, mem::align_of::<*const TyDesc>())); unsafe { let buf = head.as_ptr(); return (buf.offset(tydesc_start as isize), buf.offset(start as isize)); } } #[inline] fn alloc_noncopy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { unsafe { let tydesc = get_tydesc::<T>(); let (ty_ptr, ptr) = self.alloc_noncopy_inner(mem::size_of::<T>(), mem::min_align_of::<T>()); let ty_ptr = ty_ptr as *mut usize; let ptr = ptr as *mut T; // Write in our tydesc along with a bit indicating that it // has *not* been initialized yet. *ty_ptr = mem::transmute(tydesc); // Actually initialize it ptr::write(&mut(*ptr), op()); // Now that we are done, update the tydesc to indicate that // the object is there. *ty_ptr = bitpack_tydesc_ptr(tydesc, true); return &mut *ptr; } } /// Allocates a new item in the arena, using `op` to initialize the value, /// and returns a reference to it. #[inline] pub fn alloc<T:'longer_than_self, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { unsafe { if intrinsics::needs_drop::<T>() { self.alloc_noncopy(op) } else { self.alloc_copy(op) } } } } #[test] fn test_arena_destructors() { let arena = Arena::new(); for i in 0..10 { // Arena allocate something with drop glue to make sure it // doesn't leak. arena.alloc(|| Rc::new(i)); // Allocate something with funny size and alignment, to keep // things interesting. arena.alloc(|| [0u8, 1u8, 2u8]); } } #[test] #[should_fail] fn test_arena_destructors_fail() { let arena = Arena::new(); // Put some stuff in the arena. for i in 0..10 { // Arena allocate something with drop glue to make sure it // doesn't leak. arena.alloc(|| { Rc::new(i) }); // Allocate something with funny size and alignment, to keep // things interesting. arena.alloc(|| { [0u8, 1, 2] }); } // Now, panic while allocating arena.alloc::<Rc<i32>, _>(|| { panic!(); }); } /// A faster arena that can hold objects of only one type. /// /// Safety note: Modifying objects in the arena that have already had their /// `drop` destructors run can cause leaks, because the destructor will not /// run again for these objects. pub struct TypedArena<T> { /// A pointer to the next object to be allocated. ptr: Cell<*const T>, /// A pointer to the end of the allocated area. When this pointer is /// reached, a new chunk is allocated. end: Cell<*const T>, /// A pointer to the first arena segment. first: RefCell<*mut TypedArenaChunk<T>>, /// Marker indicating that dropping the arena causes its owned /// instances of `T` to be dropped. _own: marker::PhantomData<T>, } struct TypedArenaChunk<T> { marker: marker::PhantomData<T>, /// Pointer to the next arena segment. next: *mut TypedArenaChunk<T>, /// The number of elements that this chunk can hold. capacity: usize, // Objects follow here, suitably aligned. } fn calculate_size<T>(capacity: usize) -> usize { let mut size = mem::size_of::<TypedArenaChunk<T>>(); size = round_up(size, mem::min_align_of::<T>()); let elem_size = mem::size_of::<T>(); let elems_size = elem_size.checked_mul(capacity).unwrap(); size = size.checked_add(elems_size).unwrap(); size } impl<T> TypedArenaChunk<T> { #[inline] unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: usize) -> *mut TypedArenaChunk<T> { let size = calculate_size::<T>(capacity); let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>()) as *mut TypedArenaChunk<T>; if chunk.is_null() { alloc::oom() } (*chunk).next = next; (*chunk).capacity = capacity; chunk } /// Destroys this arena chunk. If the type descriptor is supplied, the /// drop glue is called; otherwise, drop glue is not called. #[inline] unsafe fn destroy(&mut self, len: usize) { // Destroy all the allocated objects. if intrinsics::needs_drop::<T>() { let mut start = self.start(); for _ in 0..len { ptr::read(start as *const T); // run the destructor on the pointer start = start.offset(mem::size_of::<T>() as isize) } } // Destroy the next chunk. let next = self.next; let size = calculate_size::<T>(self.capacity); deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size, mem::min_align_of::<TypedArenaChunk<T>>()); if !next.is_null() { let capacity = (*next).capacity; (*next).destroy(capacity); } } // Returns a pointer to the first allocated object. #[inline] fn start(&self) -> *const u8 { let this: *const TypedArenaChunk<T> = self; unsafe { mem::transmute(round_up(this.offset(1) as usize, mem::min_align_of::<T>())) } } // Returns a pointer to the end of the allocated space. #[inline] fn end(&self) -> *const u8 { unsafe { let size = mem::size_of::<T>().checked_mul(self.capacity).unwrap(); self.start().offset(size as isize) } } } impl<T> TypedArena<T> { /// Creates a new `TypedArena` with preallocated space for eight objects. #[inline] pub fn new() -> TypedArena<T> { TypedArena::with_capacity(8) } /// Creates a new `TypedArena` with preallocated space for the given number of /// objects. #[inline] pub fn with_capacity(capacity: usize) -> TypedArena<T> { unsafe { let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity); TypedArena { ptr: Cell::new((*chunk).start() as *const T), end: Cell::new((*chunk).end() as *const T), first: RefCell::new(chunk), _own: marker::PhantomData, } } } /// Allocates an object in the `TypedArena`, returning a reference to it. #[inline] pub fn alloc(&self, object: T) -> &mut T { if self.ptr == self.end { self.grow() } let ptr: &mut T = unsafe { let ptr: &mut T = mem::transmute(self.ptr.clone()); ptr::write(ptr, object); self.ptr.set(self.ptr.get().offset(1)); ptr }; ptr } /// Grows the arena. #[inline(never)] fn grow(&self) { unsafe { let chunk = *self.first.borrow_mut(); let new_capacity = (*chunk).capacity.checked_mul(2).unwrap(); let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity); self.ptr.set((*chunk).start() as *const T); self.end.set((*chunk).end() as *const T); *self.first.borrow_mut() = chunk } } } #[unsafe_destructor] impl<T> Drop for TypedArena<T> { fn drop(&mut self) { unsafe { // Determine how much was filled. let start = self.first.borrow().as_ref().unwrap().start() as usize; let end = self.ptr.get() as usize; let diff = (end - start) / mem::size_of::<T>(); // Pass that to the `destroy` method. (**self.first.borrow_mut()).destroy(diff) } } } #[cfg(test)] mod tests { extern crate test; use self::test::Bencher; use super::{Arena, TypedArena}; #[allow(dead_code)] struct Point { x: i32, y: i32, z: i32, } #[test] fn test_arena_alloc_nested() { struct Inner { value: u8 } struct Outer<'a> { inner: &'a Inner } enum EI<'e> { I(Inner), O(Outer<'e>) } struct Wrap<'a>(TypedArena<EI<'a>>); impl<'a> Wrap<'a> { fn alloc_inner<F:Fn() -> Inner>(&self, f: F) -> &Inner { let r: &EI = self.0.alloc(EI::I(f())); if let &EI::I(ref i) = r { i } else { panic!("mismatch"); } } fn alloc_outer<F:Fn() -> Outer<'a>>(&self, f: F) -> &Outer { let r: &EI = self.0.alloc(EI::O(f())); if let &EI::O(ref o) = r { o } else { panic!("mismatch"); } } } let arena = Wrap(TypedArena::new()); let result = arena.alloc_outer(|| Outer { inner: arena.alloc_inner(|| Inner { value: 10 }) }); assert_eq!(result.inner.value, 10); } #[test] pub fn test_copy() { let arena = TypedArena::new(); for _ in 0..100000 { arena.alloc(Point { x: 1, y: 2, z: 3, }); } } #[bench] pub fn bench_copy(b: &mut Bencher) { let arena = TypedArena::new(); b.iter(|| { arena.alloc(Point { x: 1, y: 2, z: 3, }) }) } #[bench] pub fn bench_copy_nonarena(b: &mut Bencher) { b.iter(|| { box Point { x: 1, y: 2, z: 3, } }) } #[bench] pub fn bench_copy_old_arena(b: &mut Bencher) { let arena = Arena::new(); b.iter(|| { arena.alloc(|| { Point { x: 1, y: 2, z: 3, } }) }) } #[allow(dead_code)] struct Noncopy { string: String, array: Vec<i32>, } #[test] pub fn test_noncopy() { let arena = TypedArena::new(); for _ in 0..100000 { arena.alloc(Noncopy { string: "hello world".to_string(), array: vec!( 1, 2, 3, 4, 5 ), }); } } #[bench] pub fn bench_noncopy(b: &mut Bencher) { let arena = TypedArena::new(); b.iter(|| { arena.alloc(Noncopy { string: "hello world".to_string(), array: vec!( 1, 2, 3, 4, 5 ), }) }) } #[bench] pub fn bench_noncopy_nonarena(b: &mut Bencher) { b.iter(|| { box Noncopy { string: "hello world".to_string(), array: vec!( 1, 2, 3, 4, 5 ), } }) } #[bench] pub fn bench_noncopy_old_arena(b: &mut Bencher) { let arena = Arena::new(); b.iter(|| { arena.alloc(|| Noncopy { string: "hello world".to_string(), array: vec!( 1, 2, 3, 4, 5 ), }) }) } }
apache-2.0
jiangjianqing/flow-demo
activiti/activiti5.springmvc/src/main/webapp/app/main.js
867
/*global require*/ 'use strict'; // Require.js allows us to configure shortcut alias require.config({ // The shim config allows us to configure dependencies for // scripts that do not call define() to register a module shim: { underscore: { exports: '_' }, backbone: { deps: [ 'underscore', 'jquery' ] } }, paths: { jquery: '../lib/jquery/jquery', underscore: '../lib/underscore/underscore', backbone: '../lib/backbone/backbone', text: '../lib/requirejs-text/text', domReady:'../lib/requirejs-domReady/domReady', handlebars:'../lib/handlebars/handlebars' } }); require([ 'backbone' ], function (backbone) { /*jshint nonew:false*/ // Initialize routing and start Backbone.history() console.log("main被调用"); //new Workspace(); backbone.history.start(); // Initialize the application view //new AppView(); });
apache-2.0
alvintpwang/hostboot
src/include/usr/vpd/vpdreasoncodes.H
6818
/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/include/usr/vpd/vpdreasoncodes.H $ */ /* */ /* OpenPOWER HostBoot Project */ /* */ /* Contributors Listed Below - COPYRIGHT 2013,2015 */ /* [+] Google Inc. */ /* [+] International Business Machines Corp. */ /* */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ /* implied. See the License for the specific language governing */ /* permissions and limitations under the License. */ /* */ /* IBM_PROLOG_END_TAG */ #ifndef __VPDREASONCODES_H #define __VPDREASONCODES_H // ----------------------------------------------- // Includes // ----------------------------------------------- #include <hbotcompid.H> namespace VPD { /** * @enum vpdModuleid * * @brief Module Ids used in created errorlogs. Indicates which * functions an error log was created in. * */ enum vpdModuleId { VPD_INVALID_MODULE = 0x00, // Common VPD VPD_WRITE_PNOR = 0x10, VPD_ENSURE_CACHE_IS_IN_SYNC = 0x11, VPD_GET_PN_AND_SN = 0x12, // IPVPD VPD_IPVPD_TRANSLATE_RECORD = 0x20, VPD_IPVPD_TRANSLATE_KEYWORD = 0x21, VPD_IPVPD_FIND_RECORD_OFFSET = 0x22, VPD_IPVPD_FIND_KEYWORD_ADDR = 0x23, VPD_IPVPD_CHECK_BUFFER_SIZE = 0x24, VPD_IPVPD_FIND_RECORD_OFFSET_SEEPROM = 0x30, VPD_IPVPD_FETCH_DATA = 0x31, VPD_IPVPD_WRITE_KEYWORD = 0x32, VPD_IPVPD_LOAD_PNOR = 0x33, VPD_IPVPD_GET_RECORD_LIST_SEEPROM = 0x34, // PVPD VPD_PVPD_PRESENCEDETECT = 0x50, // DIMM SPD VPD_SPD_GET_KEYWORD_VALUE = 0x61, VPD_SPD_GET_VALUE = 0x62, VPD_SPD_CHECK_SIZE = 0x63, VPD_SPD_READ_BINARY_FILE = 0x64, VPD_SPD_WRITE_KEYWORD_VALUE = 0x65, VPD_SPD_SPECIAL_CASES = 0x66, VPD_SPD_DDR3_SPECIAL_CASES = 0x67, VPD_SPD_DDR4_SPECIAL_CASES = 0x68, VPD_SPD_PRESENCE_DETECT = 0x69, VPD_SPD_CHECK_MODULE_SPECIFIC_KEYWORD = 0x6A, VPD_SPD_WRITE_VALUE = 0x6B, VPD_SPD_GET_KEYWORD_ENTRY = 0x6C, VPD_SPD_WRITE_DATA = 0x6D, VPD_SPD_GET_MOD_TYPE = 0x6E, VPD_SPD_FETCH_DATA = 0x6F, // Centaur FRU VPD // Runtime VPD VPD_RT_GET_ADDR = 0x80, VPD_RT_WRITE_PNOR = 0x81, VPD_BLD_RT_IMAGE = 0x82, VPD_SEND_MBOX_WRITE_MESSAGE = 0x83, }; /** * @enum vpdReasonCode * * @brief Reasoncodes used to describe what errors are being indicated. * */ enum vpdReasonCode { VPD_INVALID_REASONCODE = VPD_COMP_ID | 0x00, // Invalid RC VPD_INSUFFICIENT_FILE_SIZE = VPD_COMP_ID | 0x01, VPD_OPERATION_NOT_SUPPORTED = VPD_COMP_ID | 0x02, VPD_RECORD_NOT_FOUND = VPD_COMP_ID | 0x03, VPD_KEYWORD_NOT_FOUND = VPD_COMP_ID | 0x04, VPD_RECORD_MISMATCH = VPD_COMP_ID | 0x05, VPD_INSUFFICIENT_BUFFER_SIZE = VPD_COMP_ID | 0x06, VPD_INVALID_BASIC_MEMORY_TYPE = VPD_COMP_ID | 0x07, VPD_BASIC_MEMORY_TYPE = VPD_COMP_ID | 0x08, VPD_INVALID_SPD_KEYWORD = VPD_COMP_ID | 0x09, VPD_MEMTYPE_NOT_SUPPORTED = VPD_COMP_ID | 0x0A, VPD_KEYWORD_NOT_WRITABLE = VPD_COMP_ID | 0x0B, VPD_NOT_SUPPORTED = VPD_COMP_ID | 0x0C, VPD_MOD_SPECIFIC_MISMATCH_UMM = VPD_COMP_ID | 0x0D, VPD_MOD_SPECIFIC_MISMATCH_RMM = VPD_COMP_ID | 0x0E, VPD_MOD_SPECIFIC_MISMATCH_CMM = VPD_COMP_ID | 0x0F, VPD_MOD_SPECIFIC_MISMATCH_LRMM = VPD_COMP_ID | 0x10, VPD_MOD_SPECIFIC_UNSUPPORTED = VPD_COMP_ID | 0x11, VPD_SIZE_MISMATCH = VPD_COMP_ID | 0x12, VPD_INVALID_WRITE_METHOD = VPD_COMP_ID | 0x13, VPD_NULL_ENTRY = VPD_COMP_ID | 0x14, VPD_UNSUPPORTED_WRITE = VPD_COMP_ID | 0x15, VPD_RT_INVALID_TYPE = VPD_COMP_ID | 0x16, VPD_RT_CALL_TO_HYPR_FAILED = VPD_COMP_ID | 0x17, VPD_RT_WRITE_NOT_SUPPORTED = VPD_COMP_ID | 0x18, VPD_RT_NOT_INITIALIZED = VPD_COMP_ID | 0x19, VPD_RT_NULL_VPD_PTR = VPD_COMP_ID | 0x1a, VPD_INSUFFICIENT_SPACE_FOR_IMAGE = VPD_COMP_ID | 0x1b, VPD_MBOX_NOT_SUPPORTED_RT = VPD_COMP_ID | 0x1c, VPD_RECORD_INVALID_VHDR = VPD_COMP_ID | 0x30, VPD_READ_SOURCE_UNRESOLVED = VPD_COMP_ID | 0x31, VPD_REMOVE_PAGES_FAIL = VPD_COMP_ID | 0x32, VPD_UNEXPECTED_TARGET_TYPE = VPD_COMP_ID | 0x33, VPD_WRITE_DEST_UNRESOLVED = VPD_COMP_ID | 0x34, VPD_CACHE_SIZE_EXCEEDED = VPD_COMP_ID | 0x35, VPD_INVALID_LENGTH = VPD_COMP_ID | 0x36, }; /** * @enum VPD::UserDetailsTypes * * @brief UserDetailsTypes for VPD User Details Section * */ enum UserDetailsTypes { VPD_UDT_NO_FORMAT = 0x0, VPD_UDT_PARAMETERS = 0x1, VPD_UDT_CONFIG_PARMS = 0x2, }; }; // end MVPD #endif
apache-2.0
smartan/lucene
src/main/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
9762
package org.apache.lucene.search.suggest.analyzing; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; // TODO: // - allow to use the search score /** * Extension of the AnalyzingInfixSuggester which transforms the weight * after search to take into account the position of the searched term into * the indexed text. * Please note that it increases the number of elements searched and applies the * ponderation after. It might be costly for long suggestions. * * @lucene.experimental */ public class BlendedInfixSuggester extends AnalyzingInfixSuggester { /** * Coefficient used for linear blending */ protected static double LINEAR_COEF = 0.10; /** * Default factor */ public static int DEFAULT_NUM_FACTOR = 10; /** * Factor to multiply the number of searched elements */ private final int numFactor; /** * Type of blender used by the suggester */ private final BlenderType blenderType; /** * The different types of blender. */ public static enum BlenderType { /** Application dependent; override {@link * #calculateCoefficient} to compute it. */ CUSTOM, /** weight*(1 - 0.10*position) */ POSITION_LINEAR, /** weight/(1+position) */ POSITION_RECIPROCAL, // TODO: //SCORE } /** * Create a new instance, loading from a previously built * directory, if it exists. */ public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer analyzer) throws IOException { super(matchVersion, dir, analyzer); this.blenderType = BlenderType.POSITION_LINEAR; this.numFactor = DEFAULT_NUM_FACTOR; } /** * Create a new instance, loading from a previously built * directory, if it exists. * * @param blenderType Type of blending strategy, see BlenderType for more precisions * @param numFactor Factor to multiply the number of searched elements before ponderate * @throws IOException If there are problems opening the underlying Lucene index. */ public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars, BlenderType blenderType, int numFactor) throws IOException { super(matchVersion, dir, indexAnalyzer, queryAnalyzer, minPrefixChars); this.blenderType = blenderType; this.numFactor = numFactor; } @Override public List<Lookup.LookupResult> lookup(CharSequence key, Set<BytesRef> contexts, boolean onlyMorePopular, int num) throws IOException { // here we multiply the number of searched element by the defined factor return super.lookup(key, contexts, onlyMorePopular, num * numFactor); } @Override public List<Lookup.LookupResult> lookup(CharSequence key, Set<BytesRef> contexts, int num, boolean allTermsRequired, boolean doHighlight) throws IOException { // here we multiply the number of searched element by the defined factor return super.lookup(key, contexts, num * numFactor, allTermsRequired, doHighlight); } @Override protected FieldType getTextFieldType() { FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); ft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); ft.setStoreTermVectors(true); ft.setStoreTermVectorPositions(true); ft.setOmitNorms(true); return ft; } @Override protected List<Lookup.LookupResult> createResults(IndexSearcher searcher, TopFieldDocs hits, int num, CharSequence key, boolean doHighlight, Set<String> matchedTokens, String prefixToken) throws IOException { BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME); assert textDV != null; // This will just be null if app didn't pass payloads to build(): // TODO: maybe just stored fields? they compress... BinaryDocValues payloadsDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads"); TreeSet<Lookup.LookupResult> results = new TreeSet<>(LOOKUP_COMP); // we reduce the num to the one initially requested int actualNum = num / numFactor; for (int i = 0; i < hits.scoreDocs.length; i++) { FieldDoc fd = (FieldDoc) hits.scoreDocs[i]; final String text = textDV.get(fd.doc).utf8ToString(); long weight = (Long) fd.fields[0]; BytesRef payload; if (payloadsDV != null) { payload = BytesRef.deepCopyOf(payloadsDV.get(fd.doc)); } else { payload = null; } double coefficient; if (text.startsWith(key.toString())) { // if hit starts with the key, we don't change the score coefficient = 1; } else { coefficient = createCoefficient(searcher, fd.doc, matchedTokens, prefixToken); } long score = (long) (weight * coefficient); LookupResult result; if (doHighlight) { result = new LookupResult(text, highlight(text, matchedTokens, prefixToken), score, payload); } else { result = new LookupResult(text, score, payload); } boundedTreeAdd(results, result, actualNum); } return new ArrayList<>(results.descendingSet()); } /** * Add an element to the tree respecting a size limit * * @param results the tree to add in * @param result the result we try to add * @param num size limit */ private static void boundedTreeAdd(TreeSet<Lookup.LookupResult> results, Lookup.LookupResult result, int num) { if (results.size() >= num) { if (results.first().value < result.value) { results.pollFirst(); } else { return; } } results.add(result); } /** * Create the coefficient to transform the weight. * * @param doc id of the document * @param matchedTokens tokens found in the query * @param prefixToken unfinished token in the query * @return the coefficient * @throws IOException If there are problems reading term vectors from the underlying Lucene index. */ private double createCoefficient(IndexSearcher searcher, int doc, Set<String> matchedTokens, String prefixToken) throws IOException { Terms tv = searcher.getIndexReader().getTermVector(doc, TEXT_FIELD_NAME); TermsEnum it = tv.iterator(TermsEnum.EMPTY); Integer position = Integer.MAX_VALUE; BytesRef term; // find the closest token position while ((term = it.next()) != null) { String docTerm = term.utf8ToString(); if (matchedTokens.contains(docTerm) || (prefixToken != null && docTerm.startsWith(prefixToken))) { DocsAndPositionsEnum docPosEnum = it.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS); docPosEnum.nextDoc(); // use the first occurrence of the term int p = docPosEnum.nextPosition(); if (p < position) { position = p; } } } // create corresponding coefficient based on position return calculateCoefficient(position); } /** * Calculate the weight coefficient based on the position of the first matching word. * Subclass should override it to adapt it to particular needs * @param position of the first matching word in text * @return the coefficient */ protected double calculateCoefficient(int position) { double coefficient; switch (blenderType) { case POSITION_LINEAR: coefficient = 1 - LINEAR_COEF * position; break; case POSITION_RECIPROCAL: coefficient = 1. / (position + 1); break; default: coefficient = 1; } return coefficient; } private static Comparator<Lookup.LookupResult> LOOKUP_COMP = new LookUpComparator(); private static class LookUpComparator implements Comparator<Lookup.LookupResult> { @Override public int compare(Lookup.LookupResult o1, Lookup.LookupResult o2) { // order on weight if (o1.value > o2.value) { return 1; } else if (o1.value < o2.value) { return -1; } // otherwise on alphabetic order return CHARSEQUENCE_COMPARATOR.compare(o1.key, o2.key); } } }
apache-2.0
Khan/khan-linter
node_modules/graphql/validation/rules/OverlappingFieldsCanBeMerged.js
25753
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports.fieldsConflictMessage = fieldsConflictMessage; exports.OverlappingFieldsCanBeMerged = OverlappingFieldsCanBeMerged; var _error = require('../../error'); var _find = require('../../jsutils/find'); var _find2 = _interopRequireDefault(_find); var _kinds = require('../../language/kinds'); var _printer = require('../../language/printer'); var _definition = require('../../type/definition'); var _typeFromAST = require('../../utilities/typeFromAST'); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } /** * Copyright (c) 2015-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * strict */ function fieldsConflictMessage(responseName, reason) { return 'Fields "' + responseName + '" conflict because ' + reasonMessage(reason) + '. Use different aliases on the fields to fetch both if this was ' + 'intentional.'; } function reasonMessage(reason) { if (Array.isArray(reason)) { return reason.map(function (_ref) { var responseName = _ref[0], subreason = _ref[1]; return 'subfields "' + responseName + '" conflict because ' + reasonMessage(subreason); }).join(' and '); } return reason; } /** * Overlapping fields can be merged * * A selection set is only valid if all fields (including spreading any * fragments) either correspond to distinct response names or can be merged * without ambiguity. */ function OverlappingFieldsCanBeMerged(context) { // A memoization for when two fragments are compared "between" each other for // conflicts. Two fragments may be compared many times, so memoizing this can // dramatically improve the performance of this validator. var comparedFragmentPairs = new PairSet(); // A cache for the "field map" and list of fragment names found in any given // selection set. Selection sets may be asked for this information multiple // times, so this improves the performance of this validator. var cachedFieldsAndFragmentNames = new Map(); return { SelectionSet: function SelectionSet(selectionSet) { var conflicts = findConflictsWithinSelectionSet(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, context.getParentType(), selectionSet); conflicts.forEach(function (_ref2) { var _ref2$ = _ref2[0], responseName = _ref2$[0], reason = _ref2$[1], fields1 = _ref2[1], fields2 = _ref2[2]; return context.reportError(new _error.GraphQLError(fieldsConflictMessage(responseName, reason), fields1.concat(fields2))); }); } }; } // Field name and reason. // Reason is a string, or a nested list of conflicts. // Tuple defining a field node in a context. // Map of array of those. /** * Algorithm: * * Conflicts occur when two fields exist in a query which will produce the same * response name, but represent differing values, thus creating a conflict. * The algorithm below finds all conflicts via making a series of comparisons * between fields. In order to compare as few fields as possible, this makes * a series of comparisons "within" sets of fields and "between" sets of fields. * * Given any selection set, a collection produces both a set of fields by * also including all inline fragments, as well as a list of fragments * referenced by fragment spreads. * * A) Each selection set represented in the document first compares "within" its * collected set of fields, finding any conflicts between every pair of * overlapping fields. * Note: This is the *only time* that a the fields "within" a set are compared * to each other. After this only fields "between" sets are compared. * * B) Also, if any fragment is referenced in a selection set, then a * comparison is made "between" the original set of fields and the * referenced fragment. * * C) Also, if multiple fragments are referenced, then comparisons * are made "between" each referenced fragment. * * D) When comparing "between" a set of fields and a referenced fragment, first * a comparison is made between each field in the original set of fields and * each field in the the referenced set of fields. * * E) Also, if any fragment is referenced in the referenced selection set, * then a comparison is made "between" the original set of fields and the * referenced fragment (recursively referring to step D). * * F) When comparing "between" two fragments, first a comparison is made between * each field in the first referenced set of fields and each field in the the * second referenced set of fields. * * G) Also, any fragments referenced by the first must be compared to the * second, and any fragments referenced by the second must be compared to the * first (recursively referring to step F). * * H) When comparing two fields, if both have selection sets, then a comparison * is made "between" both selection sets, first comparing the set of fields in * the first selection set with the set of fields in the second. * * I) Also, if any fragment is referenced in either selection set, then a * comparison is made "between" the other set of fields and the * referenced fragment. * * J) Also, if two fragments are referenced in both selection sets, then a * comparison is made "between" the two fragments. * */ // Find all conflicts found "within" a selection set, including those found // via spreading in fragments. Called when visiting each SelectionSet in the // GraphQL Document. function findConflictsWithinSelectionSet(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, parentType, selectionSet) { var conflicts = []; var _getFieldsAndFragment = getFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, parentType, selectionSet), fieldMap = _getFieldsAndFragment[0], fragmentNames = _getFieldsAndFragment[1]; // (A) Find find all conflicts "within" the fields of this selection set. // Note: this is the *only place* `collectConflictsWithin` is called. collectConflictsWithin(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, fieldMap); if (fragmentNames.length !== 0) { // (B) Then collect conflicts between these fields and those represented by // each spread fragment name found. var comparedFragments = Object.create(null); for (var i = 0; i < fragmentNames.length; i++) { collectConflictsBetweenFieldsAndFragment(context, conflicts, cachedFieldsAndFragmentNames, comparedFragments, comparedFragmentPairs, false, fieldMap, fragmentNames[i]); // (C) Then compare this fragment with all other fragments found in this // selection set to collect conflicts between fragments spread together. // This compares each item in the list of fragment names to every other // item in that same list (except for itself). for (var j = i + 1; j < fragmentNames.length; j++) { collectConflictsBetweenFragments(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, false, fragmentNames[i], fragmentNames[j]); } } } return conflicts; } // Collect all conflicts found between a set of fields and a fragment reference // including via spreading in any nested fragments. function collectConflictsBetweenFieldsAndFragment(context, conflicts, cachedFieldsAndFragmentNames, comparedFragments, comparedFragmentPairs, areMutuallyExclusive, fieldMap, fragmentName) { // Memoize so a fragment is not compared for conflicts more than once. if (comparedFragments[fragmentName]) { return; } comparedFragments[fragmentName] = true; var fragment = context.getFragment(fragmentName); if (!fragment) { return; } var _getReferencedFieldsA = getReferencedFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, fragment), fieldMap2 = _getReferencedFieldsA[0], fragmentNames2 = _getReferencedFieldsA[1]; // Do not compare a fragment's fieldMap to itself. if (fieldMap === fieldMap2) { return; } // (D) First collect any conflicts between the provided collection of fields // and the collection of fields represented by the given fragment. collectConflictsBetween(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fieldMap, fieldMap2); // (E) Then collect any conflicts between the provided collection of fields // and any fragment names found in the given fragment. for (var i = 0; i < fragmentNames2.length; i++) { collectConflictsBetweenFieldsAndFragment(context, conflicts, cachedFieldsAndFragmentNames, comparedFragments, comparedFragmentPairs, areMutuallyExclusive, fieldMap, fragmentNames2[i]); } } // Collect all conflicts found between two fragments, including via spreading in // any nested fragments. function collectConflictsBetweenFragments(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fragmentName1, fragmentName2) { // No need to compare a fragment to itself. if (fragmentName1 === fragmentName2) { return; } // Memoize so two fragments are not compared for conflicts more than once. if (comparedFragmentPairs.has(fragmentName1, fragmentName2, areMutuallyExclusive)) { return; } comparedFragmentPairs.add(fragmentName1, fragmentName2, areMutuallyExclusive); var fragment1 = context.getFragment(fragmentName1); var fragment2 = context.getFragment(fragmentName2); if (!fragment1 || !fragment2) { return; } var _getReferencedFieldsA2 = getReferencedFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, fragment1), fieldMap1 = _getReferencedFieldsA2[0], fragmentNames1 = _getReferencedFieldsA2[1]; var _getReferencedFieldsA3 = getReferencedFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, fragment2), fieldMap2 = _getReferencedFieldsA3[0], fragmentNames2 = _getReferencedFieldsA3[1]; // (F) First, collect all conflicts between these two collections of fields // (not including any nested fragments). collectConflictsBetween(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fieldMap1, fieldMap2); // (G) Then collect conflicts between the first fragment and any nested // fragments spread in the second fragment. for (var j = 0; j < fragmentNames2.length; j++) { collectConflictsBetweenFragments(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fragmentName1, fragmentNames2[j]); } // (G) Then collect conflicts between the second fragment and any nested // fragments spread in the first fragment. for (var i = 0; i < fragmentNames1.length; i++) { collectConflictsBetweenFragments(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fragmentNames1[i], fragmentName2); } } // Find all conflicts found between two selection sets, including those found // via spreading in fragments. Called when determining if conflicts exist // between the sub-fields of two overlapping fields. function findConflictsBetweenSubSelectionSets(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, parentType1, selectionSet1, parentType2, selectionSet2) { var conflicts = []; var _getFieldsAndFragment2 = getFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, parentType1, selectionSet1), fieldMap1 = _getFieldsAndFragment2[0], fragmentNames1 = _getFieldsAndFragment2[1]; var _getFieldsAndFragment3 = getFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, parentType2, selectionSet2), fieldMap2 = _getFieldsAndFragment3[0], fragmentNames2 = _getFieldsAndFragment3[1]; // (H) First, collect all conflicts between these two collections of field. collectConflictsBetween(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fieldMap1, fieldMap2); // (I) Then collect conflicts between the first collection of fields and // those referenced by each fragment name associated with the second. if (fragmentNames2.length !== 0) { var comparedFragments = Object.create(null); for (var j = 0; j < fragmentNames2.length; j++) { collectConflictsBetweenFieldsAndFragment(context, conflicts, cachedFieldsAndFragmentNames, comparedFragments, comparedFragmentPairs, areMutuallyExclusive, fieldMap1, fragmentNames2[j]); } } // (I) Then collect conflicts between the second collection of fields and // those referenced by each fragment name associated with the first. if (fragmentNames1.length !== 0) { var _comparedFragments = Object.create(null); for (var i = 0; i < fragmentNames1.length; i++) { collectConflictsBetweenFieldsAndFragment(context, conflicts, cachedFieldsAndFragmentNames, _comparedFragments, comparedFragmentPairs, areMutuallyExclusive, fieldMap2, fragmentNames1[i]); } } // (J) Also collect conflicts between any fragment names by the first and // fragment names by the second. This compares each item in the first set of // names to each item in the second set of names. for (var _i = 0; _i < fragmentNames1.length; _i++) { for (var _j = 0; _j < fragmentNames2.length; _j++) { collectConflictsBetweenFragments(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, fragmentNames1[_i], fragmentNames2[_j]); } } return conflicts; } // Collect all Conflicts "within" one collection of fields. function collectConflictsWithin(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, fieldMap) { // A field map is a keyed collection, where each key represents a response // name and the value at that key is a list of all fields which provide that // response name. For every response name, if there are multiple fields, they // must be compared to find a potential conflict. Object.keys(fieldMap).forEach(function (responseName) { var fields = fieldMap[responseName]; // This compares every field in the list to every other field in this list // (except to itself). If the list only has one item, nothing needs to // be compared. if (fields.length > 1) { for (var i = 0; i < fields.length; i++) { for (var j = i + 1; j < fields.length; j++) { var conflict = findConflict(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, false, // within one collection is never mutually exclusive responseName, fields[i], fields[j]); if (conflict) { conflicts.push(conflict); } } } } }); } // Collect all Conflicts between two collections of fields. This is similar to, // but different from the `collectConflictsWithin` function above. This check // assumes that `collectConflictsWithin` has already been called on each // provided collection of fields. This is true because this validator traverses // each individual selection set. function collectConflictsBetween(context, conflicts, cachedFieldsAndFragmentNames, comparedFragmentPairs, parentFieldsAreMutuallyExclusive, fieldMap1, fieldMap2) { // A field map is a keyed collection, where each key represents a response // name and the value at that key is a list of all fields which provide that // response name. For any response name which appears in both provided field // maps, each field from the first field map must be compared to every field // in the second field map to find potential conflicts. Object.keys(fieldMap1).forEach(function (responseName) { var fields2 = fieldMap2[responseName]; if (fields2) { var fields1 = fieldMap1[responseName]; for (var i = 0; i < fields1.length; i++) { for (var j = 0; j < fields2.length; j++) { var conflict = findConflict(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, parentFieldsAreMutuallyExclusive, responseName, fields1[i], fields2[j]); if (conflict) { conflicts.push(conflict); } } } } }); } // Determines if there is a conflict between two particular fields, including // comparing their sub-fields. function findConflict(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, parentFieldsAreMutuallyExclusive, responseName, field1, field2) { var parentType1 = field1[0], node1 = field1[1], def1 = field1[2]; var parentType2 = field2[0], node2 = field2[1], def2 = field2[2]; // If it is known that two fields could not possibly apply at the same // time, due to the parent types, then it is safe to permit them to diverge // in aliased field or arguments used as they will not present any ambiguity // by differing. // It is known that two parent types could never overlap if they are // different Object types. Interface or Union types might overlap - if not // in the current state of the schema, then perhaps in some future version, // thus may not safely diverge. var areMutuallyExclusive = parentFieldsAreMutuallyExclusive || parentType1 !== parentType2 && (0, _definition.isObjectType)(parentType1) && (0, _definition.isObjectType)(parentType2); // The return type for each field. var type1 = def1 && def1.type; var type2 = def2 && def2.type; if (!areMutuallyExclusive) { // Two aliases must refer to the same field. var name1 = node1.name.value; var name2 = node2.name.value; if (name1 !== name2) { return [[responseName, name1 + ' and ' + name2 + ' are different fields'], [node1], [node2]]; } // Two field calls must have the same arguments. if (!sameArguments(node1.arguments || [], node2.arguments || [])) { return [[responseName, 'they have differing arguments'], [node1], [node2]]; } } if (type1 && type2 && doTypesConflict(type1, type2)) { return [[responseName, 'they return conflicting types ' + String(type1) + ' and ' + String(type2)], [node1], [node2]]; } // Collect and compare sub-fields. Use the same "visited fragment names" list // for both collections so fields in a fragment reference are never // compared to themselves. var selectionSet1 = node1.selectionSet; var selectionSet2 = node2.selectionSet; if (selectionSet1 && selectionSet2) { var conflicts = findConflictsBetweenSubSelectionSets(context, cachedFieldsAndFragmentNames, comparedFragmentPairs, areMutuallyExclusive, (0, _definition.getNamedType)(type1), selectionSet1, (0, _definition.getNamedType)(type2), selectionSet2); return subfieldConflicts(conflicts, responseName, node1, node2); } } function sameArguments(arguments1, arguments2) { if (arguments1.length !== arguments2.length) { return false; } return arguments1.every(function (argument1) { var argument2 = (0, _find2.default)(arguments2, function (argument) { return argument.name.value === argument1.name.value; }); if (!argument2) { return false; } return sameValue(argument1.value, argument2.value); }); } function sameValue(value1, value2) { return !value1 && !value2 || (0, _printer.print)(value1) === (0, _printer.print)(value2); } // Two types conflict if both types could not apply to a value simultaneously. // Composite types are ignored as their individual field types will be compared // later recursively. However List and Non-Null types must match. function doTypesConflict(type1, type2) { if ((0, _definition.isListType)(type1)) { return (0, _definition.isListType)(type2) ? doTypesConflict(type1.ofType, type2.ofType) : true; } if ((0, _definition.isListType)(type2)) { return true; } if ((0, _definition.isNonNullType)(type1)) { return (0, _definition.isNonNullType)(type2) ? doTypesConflict(type1.ofType, type2.ofType) : true; } if ((0, _definition.isNonNullType)(type2)) { return true; } if ((0, _definition.isLeafType)(type1) || (0, _definition.isLeafType)(type2)) { return type1 !== type2; } return false; } // Given a selection set, return the collection of fields (a mapping of response // name to field nodes and definitions) as well as a list of fragment names // referenced via fragment spreads. function getFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, parentType, selectionSet) { var cached = cachedFieldsAndFragmentNames.get(selectionSet); if (!cached) { var nodeAndDefs = Object.create(null); var fragmentNames = Object.create(null); _collectFieldsAndFragmentNames(context, parentType, selectionSet, nodeAndDefs, fragmentNames); cached = [nodeAndDefs, Object.keys(fragmentNames)]; cachedFieldsAndFragmentNames.set(selectionSet, cached); } return cached; } // Given a reference to a fragment, return the represented collection of fields // as well as a list of nested fragment names referenced via fragment spreads. function getReferencedFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, fragment) { // Short-circuit building a type from the node if possible. var cached = cachedFieldsAndFragmentNames.get(fragment.selectionSet); if (cached) { return cached; } var fragmentType = (0, _typeFromAST.typeFromAST)(context.getSchema(), fragment.typeCondition); return getFieldsAndFragmentNames(context, cachedFieldsAndFragmentNames, fragmentType, fragment.selectionSet); } function _collectFieldsAndFragmentNames(context, parentType, selectionSet, nodeAndDefs, fragmentNames) { for (var i = 0; i < selectionSet.selections.length; i++) { var selection = selectionSet.selections[i]; switch (selection.kind) { case _kinds.Kind.FIELD: var fieldName = selection.name.value; var fieldDef = void 0; if ((0, _definition.isObjectType)(parentType) || (0, _definition.isInterfaceType)(parentType)) { fieldDef = parentType.getFields()[fieldName]; } var responseName = selection.alias ? selection.alias.value : fieldName; if (!nodeAndDefs[responseName]) { nodeAndDefs[responseName] = []; } nodeAndDefs[responseName].push([parentType, selection, fieldDef]); break; case _kinds.Kind.FRAGMENT_SPREAD: fragmentNames[selection.name.value] = true; break; case _kinds.Kind.INLINE_FRAGMENT: var typeCondition = selection.typeCondition; var inlineFragmentType = typeCondition ? (0, _typeFromAST.typeFromAST)(context.getSchema(), typeCondition) : parentType; _collectFieldsAndFragmentNames(context, inlineFragmentType, selection.selectionSet, nodeAndDefs, fragmentNames); break; } } } // Given a series of Conflicts which occurred between two sub-fields, generate // a single Conflict. function subfieldConflicts(conflicts, responseName, node1, node2) { if (conflicts.length > 0) { return [[responseName, conflicts.map(function (_ref3) { var reason = _ref3[0]; return reason; })], conflicts.reduce(function (allFields, _ref4) { var fields1 = _ref4[1]; return allFields.concat(fields1); }, [node1]), conflicts.reduce(function (allFields, _ref5) { var fields2 = _ref5[2]; return allFields.concat(fields2); }, [node2])]; } } /** * A way to keep track of pairs of things when the ordering of the pair does * not matter. We do this by maintaining a sort of double adjacency sets. */ var PairSet = function () { function PairSet() { _classCallCheck(this, PairSet); this._data = Object.create(null); } PairSet.prototype.has = function has(a, b, areMutuallyExclusive) { var first = this._data[a]; var result = first && first[b]; if (result === undefined) { return false; } // areMutuallyExclusive being false is a superset of being true, // hence if we want to know if this PairSet "has" these two with no // exclusivity, we have to ensure it was added as such. if (areMutuallyExclusive === false) { return result === false; } return true; }; PairSet.prototype.add = function add(a, b, areMutuallyExclusive) { _pairSetAdd(this._data, a, b, areMutuallyExclusive); _pairSetAdd(this._data, b, a, areMutuallyExclusive); }; return PairSet; }(); function _pairSetAdd(data, a, b, areMutuallyExclusive) { var map = data[a]; if (!map) { map = Object.create(null); data[a] = map; } map[b] = areMutuallyExclusive; }
apache-2.0
cedral/aws-sdk-cpp
aws-cpp-sdk-mturk-requester/include/aws/mturk-requester/model/UpdateHITTypeOfHITResult.h
1222
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include <aws/mturk-requester/MTurk_EXPORTS.h> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace MTurk { namespace Model { class AWS_MTURK_API UpdateHITTypeOfHITResult { public: UpdateHITTypeOfHITResult(); UpdateHITTypeOfHITResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); UpdateHITTypeOfHITResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); }; } // namespace Model } // namespace MTurk } // namespace Aws
apache-2.0
Vudentz/zephyr
arch/arm/core/aarch32/thread.c
19460
/* * Copyright (c) 2013-2014 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief New thread creation for ARM Cortex-M and Cortex-R * * Core thread related primitives for the ARM Cortex-M and Cortex-R * processor architecture. */ #include <kernel.h> #include <ksched.h> #include <wait_q.h> #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE) #define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \ MPU_GUARD_ALIGN_AND_SIZE) #else #define FP_GUARD_EXTRA_SIZE 0 #endif #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M) extern K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE); #endif /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other * end of the stack, and thus reusable by the stack when not needed anymore. * * The initial context is an exception stack frame (ESF) since exiting the * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of * an instruction address to jump to must always be set since the CPU always * runs in thumb mode, the ESF expects the real address of the instruction, * with the lsb *not* set (instructions are always aligned on 16 bit * halfwords). Since the compiler automatically sets the lsb of function * addresses, we have to unset it manually before storing it in the 'pc' field * of the ESF. */ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { struct __basic_sf *iframe; #ifdef CONFIG_MPU_STACK_GUARD #if defined(CONFIG_USERSPACE) if (z_stack_is_user_capable(stack)) { /* Guard area is carved-out of the buffer instead of reserved * for stacks that can host user threads */ thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE; thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE; } #endif /* CONFIG_USERSPACE */ #if FP_GUARD_EXTRA_SIZE > 0 if ((thread->base.user_options & K_FP_REGS) != 0) { /* Larger guard needed due to lazy stacking of FP regs may * overshoot the guard area without writing anything. We * carve it out of the stack buffer as-needed instead of * unconditionally reserving it. */ thread->stack_info.start += FP_GUARD_EXTRA_SIZE; thread->stack_info.size -= FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ #endif /* CONFIG_MPU_STACK_GUARD */ iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr); #if defined(CONFIG_USERSPACE) if ((thread->base.user_options & K_USER) != 0) { iframe->pc = (uint32_t)arch_user_mode_enter; } else { iframe->pc = (uint32_t)z_thread_entry; } #else iframe->pc = (uint32_t)z_thread_entry; #endif #if defined(CONFIG_CPU_CORTEX_M) /* force ARM mode by clearing LSB of address */ iframe->pc &= 0xfffffffe; #endif iframe->a1 = (uint32_t)entry; iframe->a2 = (uint32_t)p1; iframe->a3 = (uint32_t)p2; iframe->a4 = (uint32_t)p3; #if defined(CONFIG_CPU_CORTEX_M) iframe->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ #else iframe->xpsr = A_BIT | MODE_SYS; #if defined(CONFIG_COMPILER_ISA_THUMB2) iframe->xpsr |= T_BIT; #endif /* CONFIG_COMPILER_ISA_THUMB2 */ #endif /* CONFIG_CPU_CORTEX_M */ thread->callee_saved.psp = (uint32_t)iframe; thread->arch.basepri = 0; #if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) thread->arch.mode = 0; #if FP_GUARD_EXTRA_SIZE > 0 if ((thread->base.user_options & K_FP_REGS) != 0) { thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; } #endif #if defined(CONFIG_USERSPACE) thread->arch.priv_stack_start = 0; #endif #endif /* * initial values in all other registers/thread entries are * irrelevant. */ } #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \ && defined(CONFIG_FPU_SHARING) static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread, bool use_large_guard) { if (use_large_guard) { /* Switch to use a large MPU guard if not already. */ if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) { /* Default guard size is used. Update required. */ thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ thread->arch.priv_stack_start += FP_GUARD_EXTRA_SIZE; } else #endif /* CONFIG_USERSPACE */ { /* Privileged thread */ thread->stack_info.start += FP_GUARD_EXTRA_SIZE; thread->stack_info.size -= FP_GUARD_EXTRA_SIZE; } } } else { /* Switch to use the default MPU guard size if not already. */ if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { /* Large guard size is used. Update required. */ thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ thread->arch.priv_stack_start -= FP_GUARD_EXTRA_SIZE; } else #endif /* CONFIG_USERSPACE */ { /* Privileged thread */ thread->stack_info.start -= FP_GUARD_EXTRA_SIZE; thread->stack_info.size += FP_GUARD_EXTRA_SIZE; } } } } /* * Adjust the MPU stack guard size together with the FPU * policy and the stack_info values for the thread that is * being switched in. */ uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread) { if (((thread->base.user_options & K_FP_REGS) != 0) || ((thread->arch.mode & CONTROL_FPCA_Msk) != 0)) { /* The thread has been pre-tagged (at creation or later) with * K_FP_REGS, i.e. it is expected to be using the FPU registers * (if not already). Activate lazy stacking and program a large * MPU guard to safely detect privilege thread stack overflows. * * OR * The thread is not pre-tagged with K_FP_REGS, but it has * generated an FP context. Activate lazy stacking and * program a large MPU guard to detect privilege thread * stack overflows. */ FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk; z_arm_thread_stack_info_adjust(thread, true); /* Tag the thread with K_FP_REGS */ thread->base.user_options |= K_FP_REGS; return MPU_GUARD_ALIGN_AND_SIZE_FLOAT; } /* Thread is not pre-tagged with K_FP_REGS, and it has * not been using the FPU. Since there is no active FPU * context, de-activate lazy stacking and program the * default MPU guard size. */ FPU->FPCCR &= (~FPU_FPCCR_LSPEN_Msk); z_arm_thread_stack_info_adjust(thread, false); return MPU_GUARD_ALIGN_AND_SIZE; } #endif #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { /* Set up privileged stack before entering user mode */ _current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no * longer used here, it instead is moved to the privilege stack * to catch stack overflows there. Un-do the calculations done * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; _current->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's * privileged stack. Adjust the available (writable) stack * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) _current->arch.priv_stack_start += ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start, _current->stack_info.size - _current->stack_info.delta); CODE_UNREACHABLE; } #endif #if defined(CONFIG_BUILTIN_STACK_GUARD) /* * @brief Configure ARM built-in stack guard * * This function configures per thread stack guards by reprogramming * the built-in Process Stack Pointer Limit Register (PSPLIM). * The functionality is meant to be used during context switch. * * @param thread thread info data structure. */ void configure_builtin_stack_guard(struct k_thread *thread) { #if defined(CONFIG_USERSPACE) if ((thread->arch.mode & CONTROL_nPRIV_Msk) != 0) { /* Only configure stack limit for threads in privileged mode * (i.e supervisor threads or user threads doing system call). * User threads executing in user mode do not require a stack * limit protection. */ __set_PSPLIM(0); return; } /* Only configure PSPLIM to guard the privileged stack area, if * the thread is currently using it, otherwise guard the default * thread stack. Note that the conditional check relies on the * thread privileged stack being allocated in higher memory area * than the default thread stack (ensured by design). */ uint32_t guard_start = ((thread->arch.priv_stack_start) && (__get_PSP() >= thread->arch.priv_stack_start)) ? (uint32_t)thread->arch.priv_stack_start : (uint32_t)thread->stack_obj; __ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj), "stack_info.start does not point to the start of the" "thread allocated area."); #else uint32_t guard_start = thread->stack_info.start; #endif #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) __set_PSPLIM(guard_start); #else #error "Built-in PSP limit checks not supported by HW" #endif } #endif /* CONFIG_BUILTIN_STACK_GUARD */ #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \ ((fault_addr != -EINVAL) ? \ ((fault_addr >= guard_start) && \ (fault_addr < (guard_start + guard_len)) && \ (stack_ptr < (guard_start + guard_len))) \ : \ (stack_ptr < (guard_start + guard_len))) /** * @brief Assess occurrence of current thread's stack corruption * * This function performs an assessment whether a memory fault (on a * given memory address) is the result of stack memory corruption of * the current thread. * * Thread stack corruption for supervisor threads or user threads in * privilege mode (when User Space is supported) is reported upon an * attempt to access the stack guard area (if MPU Stack Guard feature * is supported). Additionally the current PSP (process stack pointer) * must be pointing inside or below the guard area. * * Thread stack corruption for user threads in user mode is reported, * if the current PSP is pointing below the start of the current * thread's stack. * * Notes: * - we assume a fully descending stack, * - we assume a stacking error has occurred, * - the function shall be called when handling MemManage and Bus fault, * and only if a Stacking error has been reported. * * If stack corruption is detected, the function returns the lowest * allowed address where the Stack Pointer can safely point to, to * prevent from errors when un-stacking the corrupted stack frame * upon exception return. * * @param fault_addr memory address on which memory access violation * has been reported. It can be invalid (-EINVAL), * if only Stacking error has been reported. * @param psp current address the PSP points to * * @return The lowest allowed stack frame pointer, if error is a * thread stack corruption, otherwise return 0. */ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) const struct k_thread *thread = _current; if (thread == NULL) { return 0; } #endif #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is * effectively zero. Stack overflows may be detected only * for user threads in nPRIV mode. */ uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #if defined(CONFIG_USERSPACE) if (thread->arch.priv_stack_start) { /* User thread */ if ((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0U) { /* User thread in privilege mode */ if (IS_MPU_GUARD_VIOLATION( thread->arch.priv_stack_start - guard_len, guard_len, fault_addr, psp)) { /* Thread's privilege stack corruption */ return thread->arch.priv_stack_start; } } else { if (psp < (uint32_t)thread->stack_obj) { /* Thread's user stack corruption */ return (uint32_t)thread->stack_obj; } } } else { /* Supervisor thread */ if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len, fault_addr, psp)) { /* Supervisor thread stack corruption */ return thread->stack_info.start; } } #else /* CONFIG_USERSPACE */ #if defined(CONFIG_MULTITHREADING) if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len, fault_addr, psp)) { /* Thread stack corruption */ return thread->stack_info.start; } #else if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack, guard_len, fault_addr, psp)) { /* Thread stack corruption */ return (uint32_t)Z_THREAD_STACK_BUFFER(z_main_stack); } #endif #endif /* CONFIG_USERSPACE */ return 0; } #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { if (thread != _current) { return -EINVAL; } if (arch_is_in_isr()) { return -EINVAL; } /* Disable all floating point capabilities for the thread */ /* K_FP_REG flag is used in SWAP and stack check fail. Locking * interrupts here prevents a possible context-switch or MPU * fault to take an outdated thread user_options flag into * account. */ int key = arch_irq_lock(); thread->base.user_options &= ~K_FP_REGS; __set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk)); /* No need to add an ISB barrier after setting the CONTROL * register; arch_irq_unlock() already adds one. */ arch_irq_unlock(key); return 0; } int arch_float_enable(struct k_thread *thread, unsigned int options) { /* This is not supported in Cortex-M and Cortex-R does not have FPU */ return -ENOTSUP; } #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ /* Internal function for Cortex-M initialization, * applicable to either case of running Zephyr * with or without multi-threading support. */ static void z_arm_prepare_switch_to_main(void) { #if defined(CONFIG_FPU) /* Initialize the Floating Point Status and Control Register when in * Unshared FP Registers mode (In Shared FP Registers mode, FPSCR is * initialized at thread creation for threads that make use of the FP). */ __set_FPSCR(0); #if defined(CONFIG_FPU_SHARING) /* In Sharing mode clearing FPSCR may set the CONTROL.FPCA flag. */ __set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk))); __ISB(); #endif /* CONFIG_FPU_SHARING */ #endif /* CONFIG_FPU */ #ifdef CONFIG_ARM_MPU /* Configure static memory map. This will program MPU regions, * to set up access permissions for fixed memory sections, such * as Application Memory or No-Cacheable SRAM area. * * This function is invoked once, upon system initialization. */ z_arm_configure_static_mpu_regions(); #endif } void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, k_thread_entry_t _main) { z_arm_prepare_switch_to_main(); _current = main_thread; #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); #endif /* the ready queue cache already contains the main thread */ #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) /* * If stack protection is enabled, make sure to set it * before jumping to thread entry function */ z_arm_configure_dynamic_mpu_regions(main_thread); #endif #if defined(CONFIG_BUILTIN_STACK_GUARD) /* Set PSPLIM register for built-in stack guarding of main thread. */ #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) __set_PSPLIM(main_thread->stack_info.start); #else #error "Built-in PSP limit checks not supported by HW" #endif #endif /* CONFIG_BUILTIN_STACK_GUARD */ /* * Set PSP to the highest address of the main stack * before enabling interrupts and jumping to main. */ __asm__ volatile ( "mov r0, %0\n\t" /* Store _main in R0 */ #if defined(CONFIG_CPU_CORTEX_M) "msr PSP, %1\n\t" /* __set_PSP(stack_ptr) */ #endif "movs r1, #0\n\t" #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ || defined(CONFIG_ARMV7_R) "cpsie i\n\t" /* __enable_irq() */ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) "cpsie if\n\t" /* __enable_irq(); __enable_fault_irq() */ "msr BASEPRI, r1\n\t" /* __set_BASEPRI(0) */ #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ "isb\n\t" "movs r2, #0\n\t" "movs r3, #0\n\t" "bl z_thread_entry\n\t" /* z_thread_entry(_main, 0, 0, 0); */ : : "r" (_main), "r" (stack_ptr) : "r0" /* not to be overwritten by msr PSP, %1 */ ); CODE_UNREACHABLE; } #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M) FUNC_NORETURN void z_arm_switch_to_main_no_multithreading( k_thread_entry_t main_entry, void *p1, void *p2, void *p3) { z_arm_prepare_switch_to_main(); /* Set PSP to the highest address of the main stack. */ char *psp = Z_THREAD_STACK_BUFFER(z_main_stack) + K_THREAD_STACK_SIZEOF(z_main_stack); #if defined(CONFIG_BUILTIN_STACK_GUARD) char *psplim = (Z_THREAD_STACK_BUFFER(z_main_stack)); /* Clear PSPLIM before setting it to guard the main stack area. */ __set_PSPLIM(0); #endif /* Store all required input in registers, to be accesible * after stack pointer change. The function is not going * to return, so callee-saved registers do not need to be * stacked. */ register void *p1_inreg __asm__("r0") = p1; register void *p2_inreg __asm__("r1") = p2; register void *p3_inreg __asm__("r2") = p3; __asm__ volatile ( #ifdef CONFIG_BUILTIN_STACK_GUARD "msr PSPLIM, %[_psplim]\n\t" #endif "msr PSP, %[_psp]\n\t" /* __set_PSP(psp) */ #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) "cpsie i\n\t" /* enable_irq() */ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) "cpsie if\n\t" /* __enable_irq(); __enable_fault_irq() */ "mov r3, #0\n\t" "msr BASEPRI, r3\n\t" /* __set_BASEPRI(0) */ #endif "isb\n\t" "blx %[_main_entry]\n\t" /* main_entry(p1, p2, p3) */ #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) "cpsid i\n\t" /* disable_irq() */ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) "msr BASEPRI, %[basepri]\n\t"/* __set_BASEPRI(_EXC_IRQ_DEFAULT_PRIO) */ "isb\n\t" #endif "loop: b loop\n\t" /* while (true); */ : : "r" (p1_inreg), "r" (p2_inreg), "r" (p3_inreg), [_psp]"r" (psp), [_main_entry]"r" (main_entry) #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) , [basepri] "r" (_EXC_IRQ_DEFAULT_PRIO) #endif #ifdef CONFIG_BUILTIN_STACK_GUARD , [_psplim]"r" (psplim) #endif : ); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } #endif /* !CONFIG_MULTITHREADING && CONFIG_CPU_CORTEX_M */
apache-2.0
chiaming0914/awe-cpp-sdk
aws-cpp-sdk-route53/include/aws/route53/model/GetHealthCheckStatusResult.h
4264
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include <aws/route53/Route53_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <aws/route53/model/HealthCheckObservation.h> #include <utility> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Xml { class XmlDocument; } // namespace Xml } // namespace Utils namespace Route53 { namespace Model { /** * <p>A complex type that contains the response to a <code>GetHealthCheck</code> * request.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01/GetHealthCheckStatusResponse">AWS * API Reference</a></p> */ class AWS_ROUTE53_API GetHealthCheckStatusResult { public: GetHealthCheckStatusResult(); GetHealthCheckStatusResult(const AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result); GetHealthCheckStatusResult& operator=(const AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result); /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline const Aws::Vector<HealthCheckObservation>& GetHealthCheckObservations() const{ return m_healthCheckObservations; } /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline void SetHealthCheckObservations(const Aws::Vector<HealthCheckObservation>& value) { m_healthCheckObservations = value; } /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline void SetHealthCheckObservations(Aws::Vector<HealthCheckObservation>&& value) { m_healthCheckObservations = std::move(value); } /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline GetHealthCheckStatusResult& WithHealthCheckObservations(const Aws::Vector<HealthCheckObservation>& value) { SetHealthCheckObservations(value); return *this;} /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline GetHealthCheckStatusResult& WithHealthCheckObservations(Aws::Vector<HealthCheckObservation>&& value) { SetHealthCheckObservations(std::move(value)); return *this;} /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline GetHealthCheckStatusResult& AddHealthCheckObservations(const HealthCheckObservation& value) { m_healthCheckObservations.push_back(value); return *this; } /** * <p>A list that contains one <code>HealthCheckObservation</code> element for each * Amazon Route 53 health checker that is reporting a status about the health check * endpoint.</p> */ inline GetHealthCheckStatusResult& AddHealthCheckObservations(HealthCheckObservation&& value) { m_healthCheckObservations.push_back(std::move(value)); return *this; } private: Aws::Vector<HealthCheckObservation> m_healthCheckObservations; }; } // namespace Model } // namespace Route53 } // namespace Aws
apache-2.0
chiaming0914/awe-cpp-sdk
aws-cpp-sdk-datapipeline/source/model/DescribeObjectsRequest.cpp
2056
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/datapipeline/model/DescribeObjectsRequest.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::DataPipeline::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; DescribeObjectsRequest::DescribeObjectsRequest() : m_pipelineIdHasBeenSet(false), m_objectIdsHasBeenSet(false), m_evaluateExpressions(false), m_evaluateExpressionsHasBeenSet(false), m_markerHasBeenSet(false) { } Aws::String DescribeObjectsRequest::SerializePayload() const { JsonValue payload; if(m_pipelineIdHasBeenSet) { payload.WithString("pipelineId", m_pipelineId); } if(m_objectIdsHasBeenSet) { Array<JsonValue> objectIdsJsonList(m_objectIds.size()); for(unsigned objectIdsIndex = 0; objectIdsIndex < objectIdsJsonList.GetLength(); ++objectIdsIndex) { objectIdsJsonList[objectIdsIndex].AsString(m_objectIds[objectIdsIndex]); } payload.WithArray("objectIds", std::move(objectIdsJsonList)); } if(m_evaluateExpressionsHasBeenSet) { payload.WithBool("evaluateExpressions", m_evaluateExpressions); } if(m_markerHasBeenSet) { payload.WithString("marker", m_marker); } return payload.WriteReadable(); } Aws::Http::HeaderValueCollection DescribeObjectsRequest::GetRequestSpecificHeaders() const { Aws::Http::HeaderValueCollection headers; headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "DataPipeline.DescribeObjects")); return headers; }
apache-2.0
awakecoding/xrdp-ng
freerds/core/listener.c
2099
/** * xrdp: A Remote Desktop Protocol server. * * Copyright (C) Jay Sorg 2004-2012 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * listen for incoming connection */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "freerds.h" #include <winpr/crt.h> #include <winpr/thread.h> #include <errno.h> #include <sys/select.h> #include <sys/signal.h> void freerds_peer_accepted(freerdp_listener* instance, freerdp_peer* client) { freerds_connection_create(client); } xrdpListener* freerds_listener_create(void) { freerdp_listener* listener; listener = freerdp_listener_new(); listener->PeerAccepted = freerds_peer_accepted; return (xrdpListener*) listener; } void freerds_listener_delete(xrdpListener* self) { freerdp_listener_free((freerdp_listener*) self); } int freerds_listener_main_loop(xrdpListener* self) { DWORD status; DWORD nCount; HANDLE events[32]; HANDLE TermEvent; freerdp_listener* listener; listener = (freerdp_listener*) self; listener->Open(listener, NULL, 3389); TermEvent = g_get_term_event(); while (1) { nCount = 0; events[nCount++] = TermEvent; if (listener->GetEventHandles(listener, events, &nCount) < 0) { fprintf(stderr, "Failed to get FreeRDP file descriptor\n"); break; } status = WaitForMultipleObjects(nCount, events, FALSE, INFINITE); if (WaitForSingleObject(TermEvent, 0) == WAIT_OBJECT_0) { break; } if (listener->CheckFileDescriptor(listener) != TRUE) { fprintf(stderr, "Failed to check FreeRDP file descriptor\n"); break; } } listener->Close(listener); return 0; }
apache-2.0
NLPIE/NLP-TAB-vm
scripts/nlptab-webapp.sh
215
git clone https://github.com/nlpie/nlptab-webapp.git cd nlptab-webapp npm install gulp sed -i.bak "s/esServer: 'localhost:9200'/esServer: 'localhost:58032'/g" app/config.js cp -R app/* /var/www/html/ ufw allow http
apache-2.0
plxaye/chromium
src/content/common/resource_messages.cc
9210
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/common/resource_messages.h" #include "net/base/load_timing_info.h" #include "net/http/http_response_headers.h" #include "webkit/glue/resource_loader_bridge.h" namespace IPC { void ParamTraits<scoped_refptr<net::HttpResponseHeaders> >::Write( Message* m, const param_type& p) { WriteParam(m, p.get() != NULL); if (p) { // Do not disclose Set-Cookie headers over IPC. p->Persist(m, net::HttpResponseHeaders::PERSIST_SANS_COOKIES); } } bool ParamTraits<scoped_refptr<net::HttpResponseHeaders> >::Read( const Message* m, PickleIterator* iter, param_type* r) { bool has_object; if (!ReadParam(m, iter, &has_object)) return false; if (has_object) *r = new net::HttpResponseHeaders(*m, iter); return true; } void ParamTraits<scoped_refptr<net::HttpResponseHeaders> >::Log( const param_type& p, std::string* l) { l->append("<HttpResponseHeaders>"); } void ParamTraits<webkit_base::DataElement>::Write( Message* m, const param_type& p) { WriteParam(m, static_cast<int>(p.type())); switch (p.type()) { case webkit_base::DataElement::TYPE_BYTES: { m->WriteData(p.bytes(), static_cast<int>(p.length())); break; } case webkit_base::DataElement::TYPE_FILE: { WriteParam(m, p.path()); WriteParam(m, p.offset()); WriteParam(m, p.length()); WriteParam(m, p.expected_modification_time()); break; } case webkit_base::DataElement::TYPE_FILE_FILESYSTEM: { WriteParam(m, p.url()); WriteParam(m, p.offset()); WriteParam(m, p.length()); WriteParam(m, p.expected_modification_time()); break; } default: { DCHECK(p.type() == webkit_base::DataElement::TYPE_BLOB); WriteParam(m, p.url()); WriteParam(m, p.offset()); WriteParam(m, p.length()); break; } } } bool ParamTraits<webkit_base::DataElement>::Read( const Message* m, PickleIterator* iter, param_type* r) { int type; if (!ReadParam(m, iter, &type)) return false; switch (type) { case webkit_base::DataElement::TYPE_BYTES: { const char* data; int len; if (!m->ReadData(iter, &data, &len)) return false; r->SetToBytes(data, len); break; } case webkit_base::DataElement::TYPE_FILE: { base::FilePath file_path; uint64 offset, length; base::Time expected_modification_time; if (!ReadParam(m, iter, &file_path)) return false; if (!ReadParam(m, iter, &offset)) return false; if (!ReadParam(m, iter, &length)) return false; if (!ReadParam(m, iter, &expected_modification_time)) return false; r->SetToFilePathRange(file_path, offset, length, expected_modification_time); break; } case webkit_base::DataElement::TYPE_FILE_FILESYSTEM: { GURL file_system_url; uint64 offset, length; base::Time expected_modification_time; if (!ReadParam(m, iter, &file_system_url)) return false; if (!ReadParam(m, iter, &offset)) return false; if (!ReadParam(m, iter, &length)) return false; if (!ReadParam(m, iter, &expected_modification_time)) return false; r->SetToFileSystemUrlRange(file_system_url, offset, length, expected_modification_time); break; } default: { DCHECK(type == webkit_base::DataElement::TYPE_BLOB); GURL blob_url; uint64 offset, length; if (!ReadParam(m, iter, &blob_url)) return false; if (!ReadParam(m, iter, &offset)) return false; if (!ReadParam(m, iter, &length)) return false; r->SetToBlobUrlRange(blob_url, offset, length); break; } } return true; } void ParamTraits<webkit_base::DataElement>::Log( const param_type& p, std::string* l) { l->append("<webkit_base::DataElement>"); } void ParamTraits<scoped_refptr<webkit_glue::ResourceDevToolsInfo> >::Write( Message* m, const param_type& p) { WriteParam(m, p.get() != NULL); if (p) { WriteParam(m, p->http_status_code); WriteParam(m, p->http_status_text); WriteParam(m, p->request_headers); WriteParam(m, p->response_headers); WriteParam(m, p->request_headers_text); WriteParam(m, p->response_headers_text); } } bool ParamTraits<scoped_refptr<webkit_glue::ResourceDevToolsInfo> >::Read( const Message* m, PickleIterator* iter, param_type* r) { bool has_object; if (!ReadParam(m, iter, &has_object)) return false; if (!has_object) return true; *r = new webkit_glue::ResourceDevToolsInfo(); return ReadParam(m, iter, &(*r)->http_status_code) && ReadParam(m, iter, &(*r)->http_status_text) && ReadParam(m, iter, &(*r)->request_headers) && ReadParam(m, iter, &(*r)->response_headers) && ReadParam(m, iter, &(*r)->request_headers_text) && ReadParam(m, iter, &(*r)->response_headers_text); } void ParamTraits<scoped_refptr<webkit_glue::ResourceDevToolsInfo> >::Log( const param_type& p, std::string* l) { l->append("("); if (p) { LogParam(p->request_headers, l); l->append(", "); LogParam(p->response_headers, l); } l->append(")"); } void ParamTraits<net::LoadTimingInfo>::Write( Message* m, const param_type& p) { WriteParam(m, p.socket_log_id); WriteParam(m, p.socket_reused); WriteParam(m, p.request_start_time.is_null()); if (p.request_start_time.is_null()) return; WriteParam(m, p.request_start_time); WriteParam(m, p.request_start); WriteParam(m, p.proxy_resolve_start); WriteParam(m, p.proxy_resolve_end); WriteParam(m, p.connect_timing.dns_start); WriteParam(m, p.connect_timing.dns_end); WriteParam(m, p.connect_timing.connect_start); WriteParam(m, p.connect_timing.connect_end); WriteParam(m, p.connect_timing.ssl_start); WriteParam(m, p.connect_timing.ssl_end); WriteParam(m, p.send_start); WriteParam(m, p.send_end); WriteParam(m, p.receive_headers_end); } bool ParamTraits<net::LoadTimingInfo>::Read( const Message* m, PickleIterator* iter, param_type* r) { bool has_no_times; if (!ReadParam(m, iter, &r->socket_log_id) || !ReadParam(m, iter, &r->socket_reused) || !ReadParam(m, iter, &has_no_times)) { return false; } if (has_no_times) return true; return ReadParam(m, iter, &r->request_start_time) && ReadParam(m, iter, &r->request_start) && ReadParam(m, iter, &r->proxy_resolve_start) && ReadParam(m, iter, &r->proxy_resolve_end) && ReadParam(m, iter, &r->connect_timing.dns_start) && ReadParam(m, iter, &r->connect_timing.dns_end) && ReadParam(m, iter, &r->connect_timing.connect_start) && ReadParam(m, iter, &r->connect_timing.connect_end) && ReadParam(m, iter, &r->connect_timing.ssl_start) && ReadParam(m, iter, &r->connect_timing.ssl_end) && ReadParam(m, iter, &r->send_start) && ReadParam(m, iter, &r->send_end) && ReadParam(m, iter, &r->receive_headers_end); } void ParamTraits<net::LoadTimingInfo>::Log(const param_type& p, std::string* l) { l->append("("); LogParam(p.socket_log_id, l); l->append(","); LogParam(p.socket_reused, l); l->append(","); LogParam(p.request_start_time, l); l->append(", "); LogParam(p.request_start, l); l->append(", "); LogParam(p.proxy_resolve_start, l); l->append(", "); LogParam(p.proxy_resolve_end, l); l->append(", "); LogParam(p.connect_timing.dns_start, l); l->append(", "); LogParam(p.connect_timing.dns_end, l); l->append(", "); LogParam(p.connect_timing.connect_start, l); l->append(", "); LogParam(p.connect_timing.connect_end, l); l->append(", "); LogParam(p.connect_timing.ssl_start, l); l->append(", "); LogParam(p.connect_timing.ssl_end, l); l->append(", "); LogParam(p.send_start, l); l->append(", "); LogParam(p.send_end, l); l->append(", "); LogParam(p.receive_headers_end, l); l->append(")"); } void ParamTraits<scoped_refptr<webkit_glue::ResourceRequestBody> >::Write( Message* m, const param_type& p) { WriteParam(m, p.get() != NULL); if (p) { WriteParam(m, *p->elements()); WriteParam(m, p->identifier()); } } bool ParamTraits<scoped_refptr<webkit_glue::ResourceRequestBody> >::Read( const Message* m, PickleIterator* iter, param_type* r) { bool has_object; if (!ReadParam(m, iter, &has_object)) return false; if (!has_object) return true; std::vector<webkit_base::DataElement> elements; if (!ReadParam(m, iter, &elements)) return false; int64 identifier; if (!ReadParam(m, iter, &identifier)) return false; *r = new webkit_glue::ResourceRequestBody; (*r)->swap_elements(&elements); (*r)->set_identifier(identifier); return true; } void ParamTraits<scoped_refptr<webkit_glue::ResourceRequestBody> >::Log( const param_type& p, std::string* l) { l->append("<webkit_glue::ResourceRequestBody>"); } } // namespace IPC
apache-2.0
growingio/phoenix
phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheWrapperTest.java
4037
/** * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.apache.phoenix.query; import com.google.common.collect.Lists; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.stats.GuidePostsKey; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import java.util.HashSet; import java.util.List; import java.util.Set; public class GuidePostsCacheWrapperTest { @Mock GuidePostsCache cache; GuidePostsCacheWrapper wrapper; byte[] table = org.apache.hadoop.hbase.util.Bytes.toBytes("tableName"); byte[] columnFamily1 = Bytes.toBytesBinary("cf1"); byte[] columnFamily2 = Bytes.toBytesBinary("cf2"); @Before public void init() { MockitoAnnotations.initMocks(this); wrapper = new GuidePostsCacheWrapper(cache); } @Test public void invalidateAllTableDescriptor() { Set<byte[]> cfSet = new HashSet<>(); cfSet.add(columnFamily1); cfSet.add(columnFamily2); TableDescriptor tableDesc = Mockito.mock(TableDescriptor.class); TableName tableName = TableName.valueOf(table); Mockito.when(tableDesc.getColumnFamilyNames()).thenReturn(cfSet); Mockito.when(tableDesc.getTableName()).thenReturn(tableName); wrapper.invalidateAll(tableDesc); Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily1)); Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily2)); } @Test public void invalidateAllPTable(){ PTable ptable = Mockito.mock(PTable.class); PName pname = Mockito.mock(PName.class); PName pnamecf1 = Mockito.mock(PName.class); PName pnamecf2 = Mockito.mock(PName.class); Mockito.when(ptable.getPhysicalName()).thenReturn(pname); Mockito.when(pname.getBytes()).thenReturn(table); PColumnFamily cf1 = Mockito.mock(PColumnFamily.class); PColumnFamily cf2 = Mockito.mock(PColumnFamily.class); Mockito.when(cf1.getName()).thenReturn(pnamecf1); Mockito.when(cf2.getName()).thenReturn(pnamecf2); Mockito.when(pnamecf1.getBytes()).thenReturn(columnFamily1); Mockito.when(pnamecf2.getBytes()).thenReturn(columnFamily2); List<PColumnFamily> cfList = Lists.newArrayList(cf1,cf2); Mockito.when(ptable.getColumnFamilies()).thenReturn(cfList); wrapper.invalidateAll(ptable); Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily1)); Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily2)); } @Test(expected = NullPointerException.class) public void invalidateAllTableDescriptorNull() { TableDescriptor tableDesc = null; wrapper.invalidateAll(tableDesc); } @Test(expected = NullPointerException.class) public void invalidateAllPTableNull(){ PTable ptable = null; wrapper.invalidateAll(ptable); } }
apache-2.0
akdasari/SparkCore
spark-framework/src/main/java/org/sparkcommerce/core/util/domain/CodeType.java
1219
/* * #%L * SparkCommerce Framework * %% * Copyright (C) 2009 - 2013 Spark Commerce * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.sparkcommerce.core.util.domain; import java.io.Serializable; @Deprecated public interface CodeType extends Serializable { public void setId(Long id); public Long getId(); public void setCodeType(String type); public String getCodeType(); public void setKey(String key); public String getKey(); public void setDescription(String description); public String getDescription(); public void setModifiable(Boolean modifiable); public Boolean getModifiable(); public Boolean isModifiable(); }
apache-2.0
VladiMihaylenko/omim
search/geometry_utils.cpp
384
#include "search/geometry_utils.hpp" #include "indexer/scales.hpp" #include "geometry/mercator.hpp" namespace search { double PointDistance(m2::PointD const & a, m2::PointD const & b) { return MercatorBounds::DistanceOnEarth(a, b); } bool IsEqualMercator(m2::RectD const & r1, m2::RectD const & r2, double eps) { return m2::IsEqual(r1, r2, eps, eps); } } // namespace search
apache-2.0
helifu/kudu
java/kudu-client/src/test/java/org/apache/kudu/client/TestPartitionPruner.java
35532
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.kudu.client; import static org.junit.Assert.assertEquals; import java.util.ArrayList; import java.util.List; import com.google.common.collect.ImmutableList; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.apache.kudu.ColumnSchema; import org.apache.kudu.Schema; import org.apache.kudu.Type; import org.apache.kudu.client.KuduPredicate.ComparisonOp; import org.apache.kudu.test.KuduTestHarness; public class TestPartitionPruner { private KuduClient client; @Rule public KuduTestHarness harness = new KuduTestHarness(); @Before public void setUp() { client = harness.getClient(); } /** * Counts the partitions touched by a scan with optional primary key bounds. * The table is assumed to have three INT8 columns as the primary key. * * @param expectedTablets the expected number of tablets to satisfy the scan * @param table the table to scan * @param partitions the partitions of the table * @param lowerBoundPrimaryKey the optional lower bound primary key * @param upperBoundPrimaryKey the optional upper bound primary key */ private void checkPartitionsPrimaryKey(int expectedTablets, KuduTable table, List<Partition> partitions, byte[] lowerBoundPrimaryKey, byte[] upperBoundPrimaryKey) throws Exception { KuduScanToken.KuduScanTokenBuilder scanBuilder = client.newScanTokenBuilder(table); if (lowerBoundPrimaryKey != null) { PartialRow lower = table.getSchema().newPartialRow(); for (int i = 0; i < 3; i++) { lower.addByte(i, lowerBoundPrimaryKey[i]); } scanBuilder.lowerBound(lower); } if (upperBoundPrimaryKey != null) { PartialRow upper = table.getSchema().newPartialRow(); for (int i = 0; i < 3; i++) { upper.addByte(i, upperBoundPrimaryKey[i]); } scanBuilder.exclusiveUpperBound(upper); } PartitionPruner pruner = PartitionPruner.create(scanBuilder); int scannedPartitions = 0; for (Partition partition : partitions) { if (!pruner.shouldPruneForTests(partition)) { scannedPartitions++; } } // Check that the number of ScanTokens built for the scan matches. assertEquals(expectedTablets, scannedPartitions); assertEquals(scannedPartitions, scanBuilder.build().size()); assertEquals(expectedTablets == 0 ? 0 : 1, pruner.numRangesRemainingForTests()); } /** * Checks the number of tablets and pruner ranges generated for a scan. * * @param expectedTablets the expected number of tablets to satisfy the scan * @param expectedPrunerRanges the expected number of generated partition pruner ranges * @param table the table to scan * @param partitions the partitions of the table * @param predicates the predicates to apply to the scan */ private void checkPartitions(int expectedTablets, int expectedPrunerRanges, KuduTable table, List<Partition> partitions, KuduPredicate... predicates) { checkPartitions(expectedTablets, expectedPrunerRanges, table, partitions, null, null, predicates); } /** * Checks the number of tablets and pruner ranges generated for a scan with * predicates and optional partition key bounds. * * @param expectedTablets the expected number of tablets to satisfy the scan * @param expectedPrunerRanges the expected number of generated partition pruner ranges * @param table the table to scan * @param partitions the partitions of the table * @param lowerBoundPartitionKey an optional lower bound partition key * @param upperBoundPartitionKey an optional upper bound partition key * @param predicates the predicates to apply to the scan */ private void checkPartitions(int expectedTablets, int expectedPrunerRanges, KuduTable table, List<Partition> partitions, byte[] lowerBoundPartitionKey, byte[] upperBoundPartitionKey, KuduPredicate... predicates) { // Partition key bounds can't be applied to the ScanTokenBuilder. KuduScanner.KuduScannerBuilder scanBuilder = client.newScannerBuilder(table); for (KuduPredicate predicate : predicates) { scanBuilder.addPredicate(predicate); } if (lowerBoundPartitionKey != null) { scanBuilder.lowerBoundPartitionKeyRaw(lowerBoundPartitionKey); } if (upperBoundPartitionKey != null) { scanBuilder.exclusiveUpperBoundPartitionKeyRaw(upperBoundPartitionKey); } PartitionPruner pruner = PartitionPruner.create(scanBuilder); int scannedPartitions = 0; for (Partition partition : partitions) { if (!pruner.shouldPruneForTests(partition)) { scannedPartitions++; } } assertEquals(expectedTablets, scannedPartitions); assertEquals(expectedPrunerRanges, pruner.numRangesRemainingForTests()); // Check that the scan token builder comes up with the same amount. // The scan token builder does not allow for upper/lower partition keys. if (lowerBoundPartitionKey == null && upperBoundPartitionKey == null) { KuduScanToken.KuduScanTokenBuilder tokenBuilder = client.newScanTokenBuilder(table); for (KuduPredicate predicate : predicates) { tokenBuilder.addPredicate(predicate); } // Check that the number of ScanTokens built for the scan matches. assertEquals(expectedTablets, tokenBuilder.build().size()); } } /** * Retrieves the partitions of a table. * * @param table the table * @return the partitions of the table */ private List<Partition> getTablePartitions(KuduTable table) { List<Partition> partitions = new ArrayList<>(); for (KuduScanToken token : client.newScanTokenBuilder(table).build()) { partitions.add(token.getTablet().getPartition()); } return partitions; } @Test public void testPrimaryKeyRangePruning() throws Exception { // CREATE TABLE t // (a INT8, b INT8, c INT8) // PRIMARY KEY (a, b, c)) // PARTITION BY RANGE (a, b, c) // (PARTITION VALUES < (0, 0, 0), // PARTITION (0, 0, 0) <= VALUES < (10, 10, 10) // PARTITION (10, 10, 10) <= VALUES); ArrayList<ColumnSchema> columns = new ArrayList<>(3); columns.add(new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build()); columns.add(new ColumnSchema.ColumnSchemaBuilder("b", Type.INT8).key(true).build()); columns.add(new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build()); Schema schema = new Schema(columns); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(ImmutableList.of("a", "b", "c")); PartialRow split = schema.newPartialRow(); split.addByte("a", (byte) 0); split.addByte("b", (byte) 0); split.addByte("c", (byte) 0); tableBuilder.addSplitRow(split); split.addByte("a", (byte) 10); split.addByte("b", (byte) 10); split.addByte("c", (byte) 10); tableBuilder.addSplitRow(split); String tableName = "testPrimaryKeyRangePruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); byte min = Byte.MIN_VALUE; // No bounds checkPartitionsPrimaryKey(3, table, partitions, null, null); // PK < (-1, min, min) checkPartitionsPrimaryKey(1, table, partitions, null, new byte[] { -1, min, min }); // PK < (0, 0, 0) checkPartitionsPrimaryKey(1, table, partitions, null, new byte[] { 0, 0, 0 }); // PK < (0, 0, min) checkPartitionsPrimaryKey(1, table, partitions, null, new byte[] { 0, 0, min }); // PK < (10, 10, 10) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { 10, 10, 10 }); // PK < (100, min, min) checkPartitionsPrimaryKey(3, table, partitions, null, new byte[] { 100, min, min }); // PK >= (-10, -10, -10) checkPartitionsPrimaryKey(3, table, partitions, new byte[] { -10, -10, -10 }, null); // PK >= (0, 0, 0) checkPartitionsPrimaryKey(2, table, partitions, new byte[] { 0, 0, 0 }, null); // PK >= (100, 0, 0) checkPartitionsPrimaryKey(1, table, partitions, new byte[] { 100, 0, 0 }, null); // PK >= (-10, 0, 0) // PK < (100, 0, 0) checkPartitionsPrimaryKey(3, table, partitions, new byte[] { -10, 0, 0 }, new byte[] { 100, 0, 0 }); // PK >= (0, 0, 0) // PK < (10, 10, 10) checkPartitionsPrimaryKey(1, table, partitions, new byte[] { 0, 0, 0 }, new byte[] { 10, 0, 0 }); // PK >= (0, 0, 0) // PK < (10, 10, 11) checkPartitionsPrimaryKey(1, table, partitions, new byte[] { 0, 0, 0 }, new byte[] { 10, 0, 0 }); // PK < (0, 0, 0) // PK >= (10, 10, 11) checkPartitionsPrimaryKey(0, table, partitions, new byte[] { 10, 0, 0 }, new byte[] { 0, 0, 0 }); } @Test public void testPrimaryKeyPrefixRangePruning() throws Exception { // CREATE TABLE t // (a INT8, b INT8, c INT8) // PRIMARY KEY (a, b, c)) // PARTITION BY RANGE (a, b) // (PARTITION VALUES < (0, 0, 0)); ArrayList<ColumnSchema> columns = new ArrayList<>(3); columns.add(new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build()); columns.add(new ColumnSchema.ColumnSchemaBuilder("b", Type.INT8).key(true).build()); columns.add(new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build()); Schema schema = new Schema(columns); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(ImmutableList.of("a", "b")); PartialRow split = schema.newPartialRow(); split.addByte("a", (byte) 0); split.addByte("b", (byte) 0); tableBuilder.addSplitRow(split); String tableName = "testPrimaryKeyPrefixRangePruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); final byte min = Byte.MIN_VALUE; final byte max = Byte.MAX_VALUE; // No bounds checkPartitionsPrimaryKey(2, table, partitions, null, null); // PK < (-1, min, min) // TODO(KUDU-2178): prune the upper partition. checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { -1, min, min }); // PK < (0, 0, min) // TODO(KUDU-2178): prune the upper partition. checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { 0, 0, min }); // PK < (0, 0, 0) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { 0, 0, 0 }); // PK < (0, 1, min) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { 0, 1, min }); // PK < (0, 1, 0) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { 0, 1, 0 }); // PK < (max, max, min) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { max, max, min }); // PK < (max, max, 0) checkPartitionsPrimaryKey(2, table, partitions, null, new byte[] { max, max, 0 }); // PK >= (0, 0, min) // TODO(KUDU-2178): prune the lower partition. checkPartitionsPrimaryKey(2, table, partitions, new byte[] { 0, 0, min }, null); // PK >= (0, 0, 0) // TODO(KUDU-2178): prune the lower partition. checkPartitionsPrimaryKey(2, table, partitions, new byte[] { 0, 0, 0 }, null); // PK >= (0, -1, 0) checkPartitionsPrimaryKey(2, table, partitions, new byte[] { 0, -1, 0 }, null); } @Test public void testRangePartitionPruning() throws Exception { // CREATE TABLE t // (a INT8, b STRING, c INT8) // PRIMARY KEY (a, b, c)) // PARTITION BY RANGE (c, b) // (PARTITION VALUES < (0, "m"), // PARTITION (0, "m") <= VALUES < (10, "r") // PARTITION (10, "r") <= VALUES); ColumnSchema a = new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build(); ColumnSchema b = new ColumnSchema.ColumnSchemaBuilder("b", Type.STRING).key(true).build(); ColumnSchema c = new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build(); Schema schema = new Schema(ImmutableList.of(a, b, c)); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(ImmutableList.of("c", "b")); PartialRow split = schema.newPartialRow(); split.addByte("c", (byte) 0); split.addString("b", "m"); tableBuilder.addSplitRow(split); split.addByte("c", (byte) 10); split.addString("b", "r"); tableBuilder.addSplitRow(split); String tableName = "testRangePartitionPruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); // No Predicates checkPartitions(3, 1, table, partitions); // c < -10 checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, -10)); // c = -10 checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, -10)); // c < 10 checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, 10)); // c < 100 checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, 100)); // c < MIN checkPartitions(0, 0, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, Byte.MIN_VALUE)); // c < MAX checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, Byte.MAX_VALUE)); // c >= -10 checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, -10)); // c >= 0 checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, -10)); // c >= 5 checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 5)); // c >= 10 checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 10)); // c >= 100 checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 100)); // c >= MIN checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, Byte.MIN_VALUE)); // c >= MAX checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, Byte.MAX_VALUE)); // c >= -10 // c < 0 checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, -10), KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, 0)); // c >= 5 // c < 100 checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 5), KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, 100)); // b = "" checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, "")); // b >= "z" checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.GREATER_EQUAL, "z")); // b < "a" checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "a")); // b >= "m" // b < "z" checkPartitions(3, 1, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.GREATER_EQUAL, "m"), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "z")); // c >= 10 // b >= "r" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 10), KuduPredicate.newComparisonPredicate(b, ComparisonOp.GREATER_EQUAL, "r")); // c >= 10 // b < "r" checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.GREATER_EQUAL, 10), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "r")); // c = 10 // b < "r" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 10), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "r")); // c < 0 // b < "m" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "m")); // c < 0 // b < "z" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.LESS, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "z")); // c = 0 // b = "m\0" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, "m\0")); // c = 0 // b < "m" checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "m")); // c = 0 // b < "m\0" checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "m\0")); // c = 0 // c = 2 checkPartitions(0, 0, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 2)); // c = MIN checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, Byte.MIN_VALUE)); // c = MAX checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, Byte.MAX_VALUE)); // c IN (1, 2) checkPartitions(1, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 1, (byte) 2))); // c IN (0, 1, 2) checkPartitions(2, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1, (byte) 2))); // c IN (-10, 0) // b < "m" checkPartitions(1, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) -10, (byte) 0)), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "m")); // c IN (-10, 0) // b < "m\0" checkPartitions(2, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) -10, (byte) 0)), KuduPredicate.newComparisonPredicate(b, ComparisonOp.LESS, "m\0")); } @Test public void testHashPartitionPruning() throws Exception { // CREATE TABLE t // (a INT8, b INT8, c INT8) // PRIMARY KEY (a, b, c) // PARTITION BY HASH (a) PARTITIONS 2, // HASH (b, c) PARTITIONS 2; ColumnSchema a = new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build(); ColumnSchema b = new ColumnSchema.ColumnSchemaBuilder("b", Type.INT8).key(true).build(); ColumnSchema c = new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build(); final Schema schema = new Schema(ImmutableList.of(a, b, c)); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(new ArrayList<>()); tableBuilder.addHashPartitions(ImmutableList.of("a"), 2); tableBuilder.addHashPartitions(ImmutableList.of("b", "c"), 2); String tableName = "testHashPartitionPruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); // No Predicates checkPartitions(4, 1, table, partitions); // a = 0; checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(a, ComparisonOp.EQUAL, 0)); // a >= 0; checkPartitions(4, 1, table, partitions, KuduPredicate.newComparisonPredicate(a, ComparisonOp.GREATER_EQUAL, 0)); // a >= 0; // a < 1; checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(a, ComparisonOp.GREATER_EQUAL, 0), KuduPredicate.newComparisonPredicate(a, ComparisonOp.LESS, 1)); // a >= 0; // a < 2; checkPartitions(4, 1, table, partitions, KuduPredicate.newComparisonPredicate(a, ComparisonOp.GREATER_EQUAL, 0), KuduPredicate.newComparisonPredicate(a, ComparisonOp.LESS, 2)); // b = 1; checkPartitions(4, 1, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, 1)); // b = 1; // c = 2; checkPartitions(2, 2, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, 1), KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 2)); // a = 0; // b = 1; // c = 2; checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(a, ComparisonOp.EQUAL, 0), KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, 1), KuduPredicate.newComparisonPredicate(c, ComparisonOp.EQUAL, 2)); // a IN (0, 10) checkPartitions(4, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 10))); } @Test public void testInListHashPartitionPruning() throws Exception { // CREATE TABLE t // (a INT8, b INT8, c INT8) // PRIMARY KEY (a, b, c) // PARTITION BY HASH (a) PARTITIONS 3, // HASH (b) PARTITIONS 3, // HASH (c) PARTITIONS 3; ColumnSchema a = new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build(); ColumnSchema b = new ColumnSchema.ColumnSchemaBuilder("b", Type.INT8).key(true).build(); ColumnSchema c = new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build(); final Schema schema = new Schema(ImmutableList.of(a, b, c)); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(new ArrayList<>()); tableBuilder.addHashPartitions(ImmutableList.of("a"), 3); tableBuilder.addHashPartitions(ImmutableList.of("b"), 3); tableBuilder.addHashPartitions(ImmutableList.of("c"), 3); String tableName = "testInListHashPartitionPruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); // a in [0, 1]; checkPartitions(18, 2, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1))); // a in [0, 1, 8]; checkPartitions(27, 1, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1, (byte) 8))); // b in [0, 1]; checkPartitions(18, 6, table, partitions, KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1))); // c in [0, 1]; checkPartitions(18, 18, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // b in [0, 1], c in [0, 1]; checkPartitions(12, 12, table, partitions, KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // a in [0, 1], b in [0, 1], c in [0, 1]; checkPartitions(8, 8, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); } @Test public void testMultiColumnInListHashPruning() throws Exception { // CREATE TABLE t // (a INT8, b INT8, c INT8) // PRIMARY KEY (a, b, c) // PARTITION BY HASH (a) PARTITIONS 3, // HASH (b, c) PARTITIONS 3; ColumnSchema a = new ColumnSchema.ColumnSchemaBuilder("a", Type.INT8).key(true).build(); ColumnSchema b = new ColumnSchema.ColumnSchemaBuilder("b", Type.INT8).key(true).build(); ColumnSchema c = new ColumnSchema.ColumnSchemaBuilder("c", Type.INT8).key(true).build(); final Schema schema = new Schema(ImmutableList.of(a, b, c)); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(new ArrayList<>()); tableBuilder.addHashPartitions(ImmutableList.of("a"), 3); tableBuilder.addHashPartitions(ImmutableList.of("b", "c"), 3); String tableName = "testMultiColumnInListHashPartitionPruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); // a in [0, 1]; checkPartitions(6, 2, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1))); // a in [0, 1, 8]; checkPartitions(9, 1, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1, (byte) 8))); // b in [0, 1]; checkPartitions(9, 1, table, partitions, KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1))); // c in [0, 1]; checkPartitions(9, 1, table, partitions, KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // b in [0, 1], c in [0, 1] // (0, 0) in bucket 2 // (0, 1) in bucket 2 // (1, 0) in bucket 1 // (1, 1) in bucket 0 checkPartitions(9, 1, table, partitions, KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // b = 0, c in [0, 1] checkPartitions(3, 3, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, 0), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // b = 1, c in [0, 1] checkPartitions(6, 6, table, partitions, KuduPredicate.newComparisonPredicate(b, ComparisonOp.EQUAL, 1), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); // a in [0, 1], b in [0, 1], c in [0, 1]; checkPartitions(6, 2, table, partitions, KuduPredicate.newInListPredicate(a, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(b, ImmutableList.of((byte) 0, (byte) 1)), KuduPredicate.newInListPredicate(c, ImmutableList.of((byte) 0, (byte) 1))); } @Test public void testPruning() throws Exception { // CREATE TABLE timeseries // (host STRING, metric STRING, timestamp UNIXTIME_MICROS, value DOUBLE) // PRIMARY KEY (host, metric, time) // DISTRIBUTE BY // RANGE(time) // (PARTITION VALUES < 10, // PARTITION VALUES >= 10); // HASH (host, metric) 2 PARTITIONS; ColumnSchema host = new ColumnSchema.ColumnSchemaBuilder("host", Type.STRING).key(true).build(); ColumnSchema metric = new ColumnSchema.ColumnSchemaBuilder("metric", Type.STRING).key(true).build(); ColumnSchema timestamp = new ColumnSchema.ColumnSchemaBuilder("timestamp", Type.UNIXTIME_MICROS) .key(true).build(); ColumnSchema value = new ColumnSchema.ColumnSchemaBuilder("value", Type.DOUBLE).build(); Schema schema = new Schema(ImmutableList.of(host, metric, timestamp, value)); CreateTableOptions tableBuilder = new CreateTableOptions(); tableBuilder.setRangePartitionColumns(ImmutableList.of("timestamp")); PartialRow split = schema.newPartialRow(); split.addLong("timestamp", 10); tableBuilder.addSplitRow(split); tableBuilder.addHashPartitions(ImmutableList.of("host", "metric"), 2); String tableName = "testPruning-" + System.currentTimeMillis(); client.createTable(tableName, schema, tableBuilder); KuduTable table = client.openTable(tableName); List<Partition> partitions = getTablePartitions(table); // No Predicates checkPartitions(4, 1, table, partitions); // host = "a" checkPartitions(4, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a")); // host = "a" // metric = "a" checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a")); // host = "a" // metric = "a" // timestamp >= 9; checkPartitions(2, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.GREATER_EQUAL, 9)); // host = "a" // metric = "a" // timestamp >= 10; // timestamp < 20; checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.GREATER_EQUAL, 10), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.LESS, 20)); // host = "a" // metric = "a" // timestamp < 10; checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.LESS, 10)); // host = "a" // metric = "a" // timestamp >= 10; checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.GREATER_EQUAL, 10)); // host = "a" // metric = "a" // timestamp = 10; checkPartitions(1, 1, table, partitions, KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(metric, ComparisonOp.EQUAL, "a"), KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.EQUAL, 10)); byte[] hash1 = new byte[] { 0, 0, 0, 1 }; // partition key < (hash=1) checkPartitions(2, 1, table, partitions, null, hash1); // partition key >= (hash=1) checkPartitions(2, 1, table, partitions, hash1, null); // timestamp = 10 // partition key < (hash=1) checkPartitions(1, 1, table, partitions, null, hash1, KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.EQUAL, 10)); // timestamp = 10 // partition key >= (hash=1) checkPartitions(1, 1, table, partitions, hash1,null, KuduPredicate.newComparisonPredicate(timestamp, ComparisonOp.EQUAL, 10)); // timestamp IN (0, 9) // host = "a" // metric IN ("foo", "baz") checkPartitions(1, 1, table, partitions, KuduPredicate.newInListPredicate(timestamp, ImmutableList.of(0L, 9L)), KuduPredicate.newComparisonPredicate(host, ComparisonOp.EQUAL, "a"), KuduPredicate.newInListPredicate(metric, ImmutableList.of("foo", "baz"))); // timestamp IN (10, 100) checkPartitions(2, 2, table, partitions, KuduPredicate.newInListPredicate(timestamp, ImmutableList.of(10L, 100L))); // timestamp IN (9, 10) checkPartitions(4, 2, table, partitions, KuduPredicate.newInListPredicate(timestamp, ImmutableList.of(9L, 10L))); // timestamp IS NOT NULL checkPartitions(4, 1, table, partitions, KuduPredicate.newIsNotNullPredicate(timestamp)); // timestamp IS NULL checkPartitions(0, 0, table, partitions, KuduPredicate.newIsNullPredicate(timestamp)); } }
apache-2.0
cem3394/haskell
tools/haddock.sh
422
#!/bin/bash # Creates shallow haddocks for GitHub pages. set -eu -o pipefail IMAGE_NAME=tensorflow/haskell:v0 STACK="stack --docker --docker-image=$IMAGE_NAME" $STACK haddock --no-haddock-deps tensorflow* DOC_ROOT=$($STACK path --local-doc-root) DOCS=docs/haddock git rm -fr $DOCS mkdir -p $DOCS cp $DOC_ROOT/{*.html,*js,*.png,*.gif,*.css} $DOCS cp -a $DOC_ROOT/tensorflow* $DOCS rm -f $DOCS/*/*.haddock git add $DOCS
apache-2.0
ucpwang/cron-utils
src/main/java/com/cronutils/model/time/generator/FieldValueGenerator.java
2439
package com.cronutils.model.time.generator; import com.cronutils.model.field.expression.FieldExpression; import org.apache.commons.lang3.Validate; import java.util.List; /* * Copyright 2015 jmrozanec * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides a strategy to generate values. * Strategy is valid for 0+ numbers */ public abstract class FieldValueGenerator { protected static int NO_VALUE = Integer.MIN_VALUE; protected FieldExpression expression; public FieldValueGenerator(FieldExpression expression){ Validate.notNull(expression); Validate.isTrue(matchesFieldExpressionClass(expression), "FieldExpression does not match required class"); this.expression = expression; } /** * Generates next valid value from reference * @param reference - reference value * @return generated value - Integer * @throws NoSuchValueException - if there is no next value */ public abstract int generateNextValue(int reference) throws NoSuchValueException; /** * Generates previous valid value from reference * @param reference - reference value * @return generated value - Integer * @throws NoSuchValueException - if there is no previous value */ public abstract int generatePreviousValue(int reference) throws NoSuchValueException; protected abstract List<Integer> generateCandidatesNotIncludingIntervalExtremes(int start, int end); public abstract boolean isMatch(int value); public final List<Integer> generateCandidates(int start, int end){ List<Integer> candidates = generateCandidatesNotIncludingIntervalExtremes(start, end); if(isMatch(start)){ candidates.add(start); } if(isMatch(end)){ candidates.add(end); } return candidates; } protected abstract boolean matchesFieldExpressionClass(FieldExpression fieldExpression); }
apache-2.0
jerrinot/hazelcast
hazelcast/src/main/java/com/hazelcast/jet/impl/JobCoordinationService.java
58765
/* * Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.jet.impl; import com.hazelcast.cluster.ClusterState; import com.hazelcast.cluster.Member; import com.hazelcast.cluster.impl.MemberImpl; import com.hazelcast.core.HazelcastInstanceNotActiveException; import com.hazelcast.function.FunctionEx; import com.hazelcast.instance.impl.Node; import com.hazelcast.internal.cluster.ClusterService; import com.hazelcast.internal.metrics.MetricDescriptor; import com.hazelcast.internal.metrics.MetricsRegistry; import com.hazelcast.internal.metrics.Probe; import com.hazelcast.internal.partition.impl.InternalPartitionServiceImpl; import com.hazelcast.internal.partition.impl.PartitionServiceState; import com.hazelcast.internal.serialization.Data; import com.hazelcast.internal.util.Clock; import com.hazelcast.internal.util.counters.Counter; import com.hazelcast.internal.util.counters.MwCounter; import com.hazelcast.jet.JetException; import com.hazelcast.jet.JobAlreadyExistsException; import com.hazelcast.jet.config.JetConfig; import com.hazelcast.jet.config.JobConfig; import com.hazelcast.jet.core.DAG; import com.hazelcast.jet.core.JobNotFoundException; import com.hazelcast.jet.core.JobStatus; import com.hazelcast.jet.core.JobSuspensionCause; import com.hazelcast.jet.core.TopologyChangedException; import com.hazelcast.jet.core.Vertex; import com.hazelcast.jet.core.metrics.MetricNames; import com.hazelcast.jet.core.metrics.MetricTags; import com.hazelcast.jet.datamodel.Tuple2; import com.hazelcast.jet.impl.exception.EnteringPassiveClusterStateException; import com.hazelcast.jet.impl.execution.DoneItem; import com.hazelcast.jet.impl.metrics.RawJobMetrics; import com.hazelcast.jet.impl.observer.ObservableImpl; import com.hazelcast.jet.impl.observer.WrappedThrowable; import com.hazelcast.jet.impl.operation.GetJobIdsOperation.GetJobIdsResult; import com.hazelcast.jet.impl.operation.NotifyMemberShutdownOperation; import com.hazelcast.jet.impl.pipeline.PipelineImpl; import com.hazelcast.jet.impl.pipeline.PipelineImpl.Context; import com.hazelcast.jet.impl.util.LoggingUtil; import com.hazelcast.jet.impl.util.Util; import com.hazelcast.logging.ILogger; import com.hazelcast.ringbuffer.OverflowPolicy; import com.hazelcast.ringbuffer.Ringbuffer; import com.hazelcast.security.SecurityContext; import com.hazelcast.spi.exception.RetryableHazelcastException; import com.hazelcast.spi.impl.NodeEngineImpl; import com.hazelcast.spi.impl.executionservice.ExecutionService; import com.hazelcast.spi.properties.HazelcastProperties; import com.hazelcast.version.Version; import javax.annotation.CheckReturnValue; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.security.auth.Subject; import java.security.Permission; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.Spliterators; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static com.hazelcast.cluster.ClusterState.IN_TRANSITION; import static com.hazelcast.cluster.ClusterState.PASSIVE; import static com.hazelcast.cluster.memberselector.MemberSelectors.DATA_MEMBER_SELECTOR; import static com.hazelcast.internal.util.executor.ExecutorType.CACHED; import static com.hazelcast.jet.Util.idToString; import static com.hazelcast.jet.core.JobStatus.COMPLETING; import static com.hazelcast.jet.core.JobStatus.NOT_RUNNING; import static com.hazelcast.jet.core.JobStatus.RUNNING; import static com.hazelcast.jet.core.JobStatus.SUSPENDED; import static com.hazelcast.jet.datamodel.Tuple2.tuple2; import static com.hazelcast.jet.impl.JobClassLoaderService.JobPhase.COORDINATOR; import static com.hazelcast.jet.impl.TerminationMode.CANCEL_FORCEFUL; import static com.hazelcast.jet.impl.execution.init.CustomClassLoadedObject.deserializeWithCustomClassLoader; import static com.hazelcast.jet.impl.operation.GetJobIdsOperation.ALL_JOBS; import static com.hazelcast.jet.impl.util.ExceptionUtil.sneakyThrow; import static com.hazelcast.jet.impl.util.ExceptionUtil.withTryCatch; import static com.hazelcast.jet.impl.util.LoggingUtil.logFine; import static com.hazelcast.jet.impl.util.LoggingUtil.logFinest; import static com.hazelcast.spi.properties.ClusterProperty.JOB_SCAN_PERIOD; import static java.util.Collections.emptyList; import static java.util.Comparator.comparing; import static java.util.concurrent.CompletableFuture.completedFuture; import static java.util.concurrent.TimeUnit.HOURS; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.function.Function.identity; import static java.util.stream.Collectors.toList; /** * A service that handles MasterContexts on the coordinator member. * Job-control operations from client are handled here. */ public class JobCoordinationService { private static final String COORDINATOR_EXECUTOR_NAME = "jet:coordinator"; /** * The delay before retrying to start/scale up a job. */ private static final long RETRY_DELAY_IN_MILLIS = SECONDS.toMillis(2); private static final ThreadLocal<Boolean> IS_JOB_COORDINATOR_THREAD = ThreadLocal.withInitial(() -> false); private static final int COORDINATOR_THREADS_POOL_SIZE = 4; private static final int MIN_JOB_SCAN_PERIOD_MILLIS = 100; /** * Inserted temporarily to {@link #lightMasterContexts} to safely check for double job submission. * When reading, it's treated as if the job doesn't exist. */ private static final Object UNINITIALIZED_LIGHT_JOB_MARKER = new Object(); private final NodeEngineImpl nodeEngine; private final JetServiceBackend jetServiceBackend; private final JetConfig config; private final ILogger logger; private final JobRepository jobRepository; private final ConcurrentMap<Long, MasterContext> masterContexts = new ConcurrentHashMap<>(); private final ConcurrentMap<Long, Object> lightMasterContexts = new ConcurrentHashMap<>(); private final ConcurrentMap<UUID, CompletableFuture<Void>> membersShuttingDown = new ConcurrentHashMap<>(); private final ConcurrentMap<Long, ScheduledFuture<?>> scheduledJobTimeouts = new ConcurrentHashMap<>(); /** * Map of {memberUuid; removeTime}. * * A collection of UUIDs of members which left the cluster and for which we * didn't receive {@link NotifyMemberShutdownOperation}. */ private final Map<UUID, Long> removedMembers = new ConcurrentHashMap<>(); private final Object lock = new Object(); private volatile boolean isClusterEnteringPassiveState; private volatile boolean jobsScanned; private final AtomicInteger scaleUpScheduledCount = new AtomicInteger(); @Probe(name = MetricNames.JOBS_SUBMITTED) private final Counter jobSubmitted = MwCounter.newMwCounter(); @Probe(name = MetricNames.JOBS_COMPLETED_SUCCESSFULLY) private final Counter jobCompletedSuccessfully = MwCounter.newMwCounter(); @Probe(name = MetricNames.JOBS_COMPLETED_WITH_FAILURE) private final Counter jobCompletedWithFailure = MwCounter.newMwCounter(); private long maxJobScanPeriodInMillis; JobCoordinationService( NodeEngineImpl nodeEngine, JetServiceBackend jetServiceBackend, JetConfig config, JobRepository jobRepository ) { this.nodeEngine = nodeEngine; this.jetServiceBackend = jetServiceBackend; this.config = config; this.logger = nodeEngine.getLogger(getClass()); this.jobRepository = jobRepository; ExecutionService executionService = nodeEngine.getExecutionService(); executionService.register(COORDINATOR_EXECUTOR_NAME, COORDINATOR_THREADS_POOL_SIZE, Integer.MAX_VALUE, CACHED); // register metrics MetricsRegistry registry = nodeEngine.getMetricsRegistry(); MetricDescriptor descriptor = registry.newMetricDescriptor() .withTag(MetricTags.MODULE, "jet"); registry.registerStaticMetrics(descriptor, this); } public JobRepository jobRepository() { return jobRepository; } public void startScanningForJobs() { ExecutionService executionService = nodeEngine.getExecutionService(); HazelcastProperties properties = nodeEngine.getProperties(); maxJobScanPeriodInMillis = properties.getMillis(JOB_SCAN_PERIOD); executionService.schedule(COORDINATOR_EXECUTOR_NAME, this::scanJobs, 0, MILLISECONDS); } public CompletableFuture<Void> submitJob( long jobId, Data serializedJobDefinition, JobConfig jobConfig, Subject subject ) { CompletableFuture<Void> res = new CompletableFuture<>(); submitToCoordinatorThread(() -> { MasterContext masterContext; try { assertIsMaster("Cannot submit job " + idToString(jobId) + " to non-master node"); checkOperationalState(); // the order of operations is important. // first, check if the job is already completed JobResult jobResult = jobRepository.getJobResult(jobId); if (jobResult != null) { logger.fine("Not starting job " + idToString(jobId) + " since already completed with result: " + jobResult); return; } if (!config.isResourceUploadEnabled() && !jobConfig.getResourceConfigs().isEmpty()) { throw new JetException(Util.JET_RESOURCE_UPLOAD_DISABLED_MESSAGE); } int quorumSize = jobConfig.isSplitBrainProtectionEnabled() ? getQuorumSize() : 0; Object jobDefinition = deserializeJobDefinition(jobId, jobConfig, serializedJobDefinition); DAG dag; Data serializedDag; if (jobDefinition instanceof PipelineImpl) { int coopThreadCount = config.getCooperativeThreadCount(); dag = ((PipelineImpl) jobDefinition).toDag(new Context() { @Override public int defaultLocalParallelism() { return coopThreadCount; } }); serializedDag = nodeEngine().getSerializationService().toData(dag); } else { dag = (DAG) jobDefinition; serializedDag = serializedJobDefinition; } checkPermissions(subject, dag); Set<String> ownedObservables = ownedObservables(dag); JobRecord jobRecord = new JobRecord(nodeEngine.getClusterService().getClusterVersion(), jobId, serializedDag, dagToJson(dag), jobConfig, ownedObservables, subject); JobExecutionRecord jobExecutionRecord = new JobExecutionRecord(jobId, quorumSize); masterContext = createMasterContext(jobRecord, jobExecutionRecord); boolean hasDuplicateJobName; synchronized (lock) { assertIsMaster("Cannot submit job " + idToString(jobId) + " to non-master node"); checkOperationalState(); hasDuplicateJobName = jobConfig.getName() != null && hasActiveJobWithName(jobConfig.getName()); if (!hasDuplicateJobName) { // just try to initiate the coordination MasterContext prev = masterContexts.putIfAbsent(jobId, masterContext); if (prev != null) { logger.fine("Joining to already existing masterContext " + prev.jobIdString()); return; } } } if (hasDuplicateJobName) { jobRepository.deleteJob(jobId); throw new JobAlreadyExistsException("Another active job with equal name (" + jobConfig.getName() + ") exists: " + idToString(jobId)); } // If job is not currently running, it might be that it is just completed if (completeMasterContextIfJobAlreadyCompleted(masterContext)) { return; } // If there is no master context and job result at the same time, it means this is the first submission jobSubmitted.inc(); jobRepository.putNewJobRecord(jobRecord); logger.info("Starting job " + idToString(masterContext.jobId()) + " based on submit request"); } catch (Throwable e) { jetServiceBackend.getJobClassLoaderService() .tryRemoveClassloadersForJob(jobId, COORDINATOR); res.completeExceptionally(e); throw e; } finally { res.complete(null); } tryStartJob(masterContext); }); return res; } public CompletableFuture<Void> submitLightJob( long jobId, Data serializedJobDefinition, JobConfig jobConfig, Subject subject ) { Object jobDefinition = nodeEngine().getSerializationService().toObject(serializedJobDefinition); DAG dag; if (jobDefinition instanceof DAG) { dag = (DAG) jobDefinition; } else { int coopThreadCount = config.getCooperativeThreadCount(); dag = ((PipelineImpl) jobDefinition).toDag(new Context() { @Override public int defaultLocalParallelism() { return coopThreadCount; } }); } // First insert just a marker into the map. This is to prevent initializing the light job if the jobId // was submitted twice. This can happen e.g. if the client retries. Object oldContext = lightMasterContexts.putIfAbsent(jobId, UNINITIALIZED_LIGHT_JOB_MARKER); if (oldContext != null) { throw new JetException("duplicate jobId " + idToString(jobId)); } checkPermissions(subject, dag); // Initialize and start the job (happens in the constructor). We do this before adding the actual // LightMasterContext to the map to avoid possible races of the job initialization and cancellation. LightMasterContext mc = new LightMasterContext(nodeEngine, this, dag, jobId, jobConfig, subject); oldContext = lightMasterContexts.put(jobId, mc); assert oldContext == UNINITIALIZED_LIGHT_JOB_MARKER; scheduleJobTimeout(jobId, jobConfig.getTimeoutMillis()); return mc.getCompletionFuture() .whenComplete((r, t) -> { Object removed = lightMasterContexts.remove(jobId); assert removed instanceof LightMasterContext : "LMC not found: " + removed; unscheduleJobTimeout(jobId); }); } public long getJobSubmittedCount() { return jobSubmitted.get(); } private void checkPermissions(Subject subject, DAG dag) { SecurityContext securityContext = nodeEngine.getNode().securityContext; if (securityContext == null || subject == null) { return; } for (Vertex vertex : dag) { Permission requiredPermission = vertex.getMetaSupplier().getRequiredPermission(); if (requiredPermission != null) { securityContext.checkPermission(subject, requiredPermission); } } } private static Set<String> ownedObservables(DAG dag) { return StreamSupport.stream(Spliterators.spliteratorUnknownSize(dag.iterator(), 0), false) .map(vertex -> vertex.getMetaSupplier().getTags().get(ObservableImpl.OWNED_OBSERVABLE)) .filter(Objects::nonNull) .collect(Collectors.toSet()); } @SuppressWarnings("WeakerAccess") // used by jet-enterprise MasterContext createMasterContext(JobRecord jobRecord, JobExecutionRecord jobExecutionRecord) { return new MasterContext(nodeEngine, this, jobRecord, jobExecutionRecord); } private boolean hasActiveJobWithName(@Nonnull String jobName) { // if scanJob() has not run yet, master context objects may not be initialized. // in this case, we cannot check if the new job submission has a duplicate job name. // therefore, we will retry until scanJob() task runs at least once. if (!jobsScanned) { throw new RetryableHazelcastException("Cannot submit job with name '" + jobName + "' before the master node initializes job coordination service's state"); } return masterContexts.values() .stream() .anyMatch(ctx -> jobName.equals(ctx.jobConfig().getName())); } public CompletableFuture<Void> prepareForPassiveClusterState() { assertIsMaster("Cannot prepare for passive cluster state on a non-master node"); synchronized (lock) { isClusterEnteringPassiveState = true; } return submitToCoordinatorThread(() -> { CompletableFuture[] futures = masterContexts .values().stream() .map(mc -> mc.jobContext().gracefullyTerminate()) .toArray(CompletableFuture[]::new); return CompletableFuture.allOf(futures); }).thenCompose(identity()); } public void clusterChangeDone() { synchronized (lock) { isClusterEnteringPassiveState = false; } } public void reset() { assert !isMaster() : "this member is a master"; List<MasterContext> contexts; synchronized (lock) { contexts = new ArrayList<>(masterContexts.values()); masterContexts.clear(); jobsScanned = false; } contexts.forEach(ctx -> ctx.jobContext().setFinalResult(new CancellationException())); } public CompletableFuture<Void> joinSubmittedJob(long jobId) { checkOperationalState(); CompletableFuture<CompletableFuture<Void>> future = callWithJob(jobId, mc -> mc.jobContext().jobCompletionFuture() .handle((r, t) -> { if (t == null) { return null; } if (t instanceof CancellationException || t instanceof JetException) { throw sneakyThrow(t); } throw new JetException(t.toString(), t); }), JobResult::asCompletableFuture, jobRecord -> { JobExecutionRecord jobExecutionRecord = ensureExecutionRecord(jobId, jobRepository.getJobExecutionRecord(jobId)); return startJobIfNotStartedOrCompleted(jobRecord, jobExecutionRecord, "join request from client"); }, null ); return future .thenCompose(identity()); // unwrap the inner future } public CompletableFuture<Void> joinLightJob(long jobId) { Object mc = lightMasterContexts.get(jobId); if (mc == null || mc == UNINITIALIZED_LIGHT_JOB_MARKER) { throw new JobNotFoundException(jobId); } return ((LightMasterContext) mc).getCompletionFuture(); } public CompletableFuture<Void> terminateJob(long jobId, TerminationMode terminationMode) { return runWithJob(jobId, masterContext -> { // User can cancel in any state, other terminations are allowed only when running. // This is not technically required (we can request termination in any state), // but this method is only called by the user. It would be weird for the client to // request a restart if the job didn't start yet etc. // Also, it would be weird to restart the job during STARTING: as soon as it will start, // it will restart. // In any case, it doesn't make sense to restart a suspended job. JobStatus jobStatus = masterContext.jobStatus(); if (jobStatus != RUNNING && terminationMode != CANCEL_FORCEFUL) { throw new IllegalStateException("Cannot " + terminationMode + ", job status is " + jobStatus + ", should be " + RUNNING); } String terminationResult = masterContext.jobContext().requestTermination(terminationMode, false).f1(); if (terminationResult != null) { throw new IllegalStateException("Cannot " + terminationMode + ": " + terminationResult); } }, jobResult -> { if (terminationMode != CANCEL_FORCEFUL) { throw new IllegalStateException("Cannot " + terminationMode + " job " + idToString(jobId) + " because it already has a result: " + jobResult); } logger.fine("Ignoring cancellation of a completed job " + idToString(jobId)); }, jobRecord -> { // we'll eventually learn of the job through scanning of records or from a join operation throw new RetryableHazelcastException("No MasterContext found for job " + idToString(jobId) + " for " + terminationMode); } ); } public void terminateLightJob(long jobId) { Object mc = lightMasterContexts.get(jobId); if (mc == null || mc == UNINITIALIZED_LIGHT_JOB_MARKER) { throw new JobNotFoundException(jobId); } ((LightMasterContext) mc).requestTermination(); } /** * Return the job IDs of jobs with the given name, sorted by * {active/completed, creation time}, active & newest first. */ public CompletableFuture<GetJobIdsResult> getJobIds(@Nullable String onlyName, long onlyJobId) { if (onlyName != null) { assertIsMaster("Cannot query list of job IDs by name on non-master node"); } return submitToCoordinatorThread(() -> { if (onlyJobId != ALL_JOBS) { Object lmc = lightMasterContexts.get(onlyJobId); if (lmc != null && lmc != UNINITIALIZED_LIGHT_JOB_MARKER) { return new GetJobIdsResult(onlyJobId, true); } if (isMaster()) { try { callWithJob(onlyJobId, mc -> null, jobResult -> null, jobRecord -> null, null) .get(); } catch (ExecutionException e) { if (e.getCause() instanceof JobNotFoundException) { return GetJobIdsResult.EMPTY; } throw e; } return new GetJobIdsResult(onlyJobId, false); } return GetJobIdsResult.EMPTY; } List<Tuple2<Long, Boolean>> result = new ArrayList<>(); // add light jobs - only if no name is requested, light jobs can't have a name if (onlyName == null) { for (Object ctx : lightMasterContexts.values()) { if (ctx != UNINITIALIZED_LIGHT_JOB_MARKER) { result.add(tuple2(((LightMasterContext) ctx).getJobId(), true)); } } } // add normal jobs - only on master if (isMaster()) { if (onlyName != null) { // we first need to collect to a map where the jobId is the key to eliminate possible duplicates // in JobResult and also to be able to sort from newest to oldest Map<Long, Long> jobs = new HashMap<>(); for (MasterContext ctx : masterContexts.values()) { if (onlyName.equals(ctx.jobConfig().getName())) { jobs.put(ctx.jobId(), Long.MAX_VALUE); } } for (JobResult jobResult : jobRepository.getJobResults(onlyName)) { jobs.put(jobResult.getJobId(), jobResult.getCreationTime()); } jobs.entrySet().stream() .sorted(comparing(Entry<Long, Long>::getValue).reversed()) .forEach(entry -> result.add(tuple2(entry.getKey(), false))); } else { for (Long jobId : jobRepository.getAllJobIds()) { result.add(tuple2(jobId, false)); } } } return new GetJobIdsResult(result); }); } /** * Returns the job status or fails with {@link JobNotFoundException} * if the requested job is not found. */ public CompletableFuture<JobStatus> getJobStatus(long jobId) { return callWithJob(jobId, mc -> { // When the job finishes running, we write NOT_RUNNING to jobStatus first and then // write null to requestedTerminationMode (see MasterJobContext.finalizeJob()). We // have to read them in the opposite order. TerminationMode terminationMode = mc.jobContext().requestedTerminationMode(); JobStatus jobStatus = mc.jobStatus(); return jobStatus == RUNNING && terminationMode != null ? COMPLETING : jobStatus; }, JobResult::getJobStatus, jobRecord -> NOT_RUNNING, jobExecutionRecord -> jobExecutionRecord.isSuspended() ? SUSPENDED : NOT_RUNNING ); } /** * Returns the reason why this job has been suspended in a human-readable * form. * <p> * Fails with {@link JobNotFoundException} if the requested job is not found. * <p> * Fails with {@link IllegalStateException} if the requested job is not * currently in a suspended state. */ public CompletableFuture<JobSuspensionCause> getJobSuspensionCause(long jobId) { FunctionEx<JobExecutionRecord, JobSuspensionCause> jobExecutionRecordHandler = jobExecutionRecord -> { JobSuspensionCause cause = jobExecutionRecord.getSuspensionCause(); if (cause == null) { throw new IllegalStateException("Job not suspended"); } return cause; }; return callWithJob(jobId, mc -> { JobExecutionRecord jobExecutionRecord = mc.jobExecutionRecord(); return jobExecutionRecordHandler.apply(jobExecutionRecord); }, jobResult -> { throw new IllegalStateException("Job not suspended"); }, jobRecord -> { throw new IllegalStateException("Job not suspended"); }, jobExecutionRecordHandler ); } /** * Returns the latest metrics for a job or fails with {@link JobNotFoundException} * if the requested job is not found. */ public CompletableFuture<List<RawJobMetrics>> getJobMetrics(long jobId) { CompletableFuture<List<RawJobMetrics>> cf = new CompletableFuture<>(); runWithJob(jobId, mc -> mc.jobContext().collectMetrics(cf), jobResult -> { List<RawJobMetrics> metrics = jobRepository.getJobMetrics(jobId); cf.complete(metrics != null ? metrics : emptyList()); }, jobRecord -> cf.complete(emptyList()) ); return cf; } /** * Returns the job submission time or fails with {@link JobNotFoundException} * if the requested job is not found. */ public CompletableFuture<Long> getJobSubmissionTime(long jobId, boolean isLightJob) { if (isLightJob) { Object mc = lightMasterContexts.get(jobId); if (mc == null || mc == UNINITIALIZED_LIGHT_JOB_MARKER) { throw new JobNotFoundException(jobId); } return completedFuture(((LightMasterContext) mc).getStartTime()); } return callWithJob(jobId, mc -> mc.jobRecord().getCreationTime(), JobResult::getCreationTime, JobRecord::getCreationTime, null ); } public CompletableFuture<Void> resumeJob(long jobId) { return runWithJob(jobId, masterContext -> masterContext.jobContext().resumeJob(jobRepository::newExecutionId), jobResult -> { throw new IllegalStateException("Job already completed"); }, jobRecord -> { throw new RetryableHazelcastException("Job " + idToString(jobId) + " not yet discovered"); } ); } /** * Return a summary of all jobs */ public CompletableFuture<List<JobSummary>> getJobSummaryList() { return submitToCoordinatorThread(() -> { Map<Long, JobSummary> jobs = new HashMap<>(); if (isMaster()) { // running jobs jobRepository.getJobRecords().stream().map(this::getJobSummary).forEach(s -> jobs.put(s.getJobId(), s)); // completed jobs jobRepository.getJobResults().stream() .map(r -> new JobSummary( r.getJobId(), r.getJobNameOrId(), r.getJobStatus(), r.getCreationTime(), r.getCompletionTime(), r.getFailureText())) .forEach(s -> jobs.put(s.getJobId(), s)); } // light jobs lightMasterContexts.values().stream() .filter(lmc -> lmc != UNINITIALIZED_LIGHT_JOB_MARKER) .map(LightMasterContext.class::cast) .map(lmc -> new JobSummary( true, lmc.getJobId(), lmc.getJobId(), idToString(lmc.getJobId()), RUNNING, lmc.getStartTime())) .forEach(s -> jobs.put(s.getJobId(), s)); return jobs.values().stream().sorted(comparing(JobSummary::getSubmissionTime).reversed()).collect(toList()); }); } /** * Add the given member to shutting down members. This will prevent * submission of more executions until the member actually leaves the * cluster. The returned future will complete when all executions of which * the member is a participant terminate. * <p> * The method is idempotent, the {@link NotifyMemberShutdownOperation} * which calls it can be retried. */ @Nonnull public CompletableFuture<Void> addShuttingDownMember(UUID uuid) { CompletableFuture<Void> future = new CompletableFuture<>(); CompletableFuture<Void> oldFuture = membersShuttingDown.putIfAbsent(uuid, future); if (oldFuture != null) { return oldFuture; } if (removedMembers.containsKey(uuid)) { logFine(logger, "NotifyMemberShutdownOperation received for a member that was already " + "removed from the cluster: %s", uuid); return completedFuture(null); } logFine(logger, "Added a shutting-down member: %s", uuid); CompletableFuture[] futures = masterContexts.values().stream() .map(mc -> mc.jobContext().onParticipantGracefulShutdown(uuid)) .toArray(CompletableFuture[]::new); // Need to do this even if futures.length == 0, we need to perform the action in whenComplete CompletableFuture.allOf(futures) .whenComplete(withTryCatch(logger, (r, e) -> future.complete(null))); return future; } // only for testing public Map<Long, MasterContext> getMasterContexts() { return new HashMap<>(masterContexts); } // only for testing public Map<Long, Object> getLightMasterContexts() { return new HashMap<>(lightMasterContexts); } // only for testing public MasterContext getMasterContext(long jobId) { return masterContexts.get(jobId); } JetServiceBackend getJetServiceBackend() { return jetServiceBackend; } boolean shouldStartJobs() { if (!isMaster() || !nodeEngine.isRunning()) { return false; } ClusterState clusterState = nodeEngine.getClusterService().getClusterState(); if (isClusterEnteringPassiveState || clusterState == PASSIVE || clusterState == IN_TRANSITION) { logger.fine("Not starting jobs because cluster is in passive state or in transition."); return false; } // if there are any members in a shutdown process, don't start jobs if (!membersShuttingDown.isEmpty()) { LoggingUtil.logFine(logger, "Not starting jobs because members are shutting down: %s", membersShuttingDown.keySet()); return false; } Version clusterVersion = nodeEngine.getClusterService().getClusterVersion(); for (Member m : nodeEngine.getClusterService().getMembers()) { if (!clusterVersion.equals(m.getVersion().asVersion())) { logger.fine("Not starting non-light jobs because rolling upgrade is in progress"); return false; } } PartitionServiceState state = getInternalPartitionService().getPartitionReplicaStateChecker().getPartitionServiceState(); if (state != PartitionServiceState.SAFE) { logger.fine("Not starting jobs because partition replication is not in safe state, but in " + state); return false; } if (!getInternalPartitionService().getPartitionStateManager().isInitialized()) { logger.fine("Not starting jobs because partitions are not yet initialized."); return false; } return true; } private CompletableFuture<Void> runWithJob( long jobId, @Nonnull Consumer<MasterContext> masterContextHandler, @Nonnull Consumer<JobResult> jobResultHandler, @Nonnull Consumer<JobRecord> jobRecordHandler ) { return callWithJob(jobId, toNullFunction(masterContextHandler), toNullFunction(jobResultHandler), toNullFunction(jobRecordHandler), null ); } /** * Returns a function that passes its argument to the given {@code * consumer} and returns {@code null}. */ @Nonnull private <T, R> Function<T, R> toNullFunction(@Nonnull Consumer<T> consumer) { return val -> { consumer.accept(val); return null; }; } private <T> CompletableFuture<T> callWithJob( long jobId, @Nonnull Function<MasterContext, T> masterContextHandler, @Nonnull Function<JobResult, T> jobResultHandler, @Nonnull Function<JobRecord, T> jobRecordHandler, @Nullable Function<JobExecutionRecord, T> jobExecutionRecordHandler ) { assertIsMaster("Cannot do this task on non-master. jobId=" + idToString(jobId)); return submitToCoordinatorThread(() -> { // when job is finalized, actions happen in this order: // - JobResult and JobMetrics are created // - JobRecord and JobExecutionRecord are deleted // - masterContext is removed from the map // We check them in reverse order so that no race is possible. // // We check the JobResult after MasterContext for optimization because in most cases // there will either be MasterContext or JobResult. Neither of them is present only after // master failed and the new master didn't yet scan jobs. We check the JobResult // again at the end for correctness. // check masterContext first MasterContext mc = masterContexts.get(jobId); if (mc != null) { return masterContextHandler.apply(mc); } // early check of JobResult. JobResult jobResult = jobRepository.getJobResult(jobId); if (jobResult != null) { return jobResultHandler.apply(jobResult); } // the job might not be yet discovered by job record scanning JobExecutionRecord jobExRecord; if (jobExecutionRecordHandler != null && (jobExRecord = jobRepository.getJobExecutionRecord(jobId)) != null) { return jobExecutionRecordHandler.apply(jobExRecord); } JobRecord jobRecord; if ((jobRecord = jobRepository.getJobRecord(jobId)) != null) { return jobRecordHandler.apply(jobRecord); } // second check for JobResult, see comment at the top of the method jobResult = jobRepository.getJobResult(jobId); if (jobResult != null) { return jobResultHandler.apply(jobResult); } // job doesn't exist throw new JobNotFoundException(jobId); }); } void onMemberAdded(MemberImpl addedMember) { // the member can re-join with the same UUID in certain scenarios removedMembers.remove(addedMember.getUuid()); if (addedMember.isLiteMember()) { return; } updateQuorumValues(); scheduleScaleUp(config.getScaleUpDelayMillis()); } void onMemberRemoved(UUID uuid) { if (membersShuttingDown.remove(uuid) != null) { logFine(logger, "Removed a shutting-down member: %s, now shuttingDownMembers=%s", uuid, membersShuttingDown.keySet()); } else { removedMembers.put(uuid, System.nanoTime()); } // clean up old entries from removedMembers (the value is time when the member was removed) long removeThreshold = System.nanoTime() - HOURS.toNanos(1); removedMembers.entrySet().removeIf(en -> en.getValue() < removeThreshold); } boolean isQuorumPresent(int quorumSize) { return getDataMemberCount() >= quorumSize; } /** * Completes the job which is coordinated with the given master context object. */ @CheckReturnValue CompletableFuture<Void> completeJob(MasterContext masterContext, Throwable error, long completionTime) { return submitToCoordinatorThread(() -> { // the order of operations is important. List<RawJobMetrics> jobMetrics = masterContext.jobConfig().isStoreMetricsAfterJobCompletion() ? masterContext.jobContext().jobMetrics() : null; jobRepository.completeJob(masterContext, jobMetrics, error, completionTime); if (masterContexts.remove(masterContext.jobId(), masterContext)) { completeObservables(masterContext.jobRecord().getOwnedObservables(), error); logger.fine(masterContext.jobIdString() + " is completed"); (error == null ? jobCompletedSuccessfully : jobCompletedWithFailure).inc(); } else { MasterContext existing = masterContexts.get(masterContext.jobId()); if (existing != null) { logger.severe("Different master context found to complete " + masterContext.jobIdString() + ", master context execution " + idToString(existing.executionId())); } else { logger.severe("No master context found to complete " + masterContext.jobIdString()); } } unscheduleJobTimeout(masterContext.jobId()); }); } /** * Schedules a restart task that will be run in future for the given job */ void scheduleRestart(long jobId) { MasterContext masterContext = masterContexts.get(jobId); if (masterContext == null) { logger.severe("Master context for job " + idToString(jobId) + " not found to schedule restart"); return; } logger.fine("Scheduling restart on master for job " + masterContext.jobName()); nodeEngine.getExecutionService().schedule(COORDINATOR_EXECUTOR_NAME, () -> restartJob(jobId), RETRY_DELAY_IN_MILLIS, MILLISECONDS); } void scheduleSnapshot(MasterContext mc, long executionId) { long snapshotInterval = mc.jobConfig().getSnapshotIntervalMillis(); ExecutionService executionService = nodeEngine.getExecutionService(); if (logger.isFineEnabled()) { logger.fine(mc.jobIdString() + " snapshot is scheduled in " + snapshotInterval + "ms"); } executionService.schedule(COORDINATOR_EXECUTOR_NAME, () -> mc.snapshotContext().startScheduledSnapshot(executionId), snapshotInterval, MILLISECONDS); } /** * Restarts a job for a new execution if the cluster is stable. * Otherwise, it reschedules the restart task. */ void restartJob(long jobId) { MasterContext masterContext = masterContexts.get(jobId); if (masterContext == null) { logger.severe("Master context for job " + idToString(jobId) + " not found to restart"); return; } tryStartJob(masterContext); } private void checkOperationalState() { if (isClusterEnteringPassiveState) { throw new EnteringPassiveClusterStateException(); } } private void scheduleScaleUp(long delay) { int counter = scaleUpScheduledCount.incrementAndGet(); nodeEngine.getExecutionService().schedule(() -> scaleJobsUpNow(counter), delay, MILLISECONDS); } private void scaleJobsUpNow(int counter) { // if another scale-up was scheduled after this one, ignore this one if (scaleUpScheduledCount.get() != counter) { return; } // if we can't start jobs yet, we also won't tear them down if (!shouldStartJobs()) { scheduleScaleUp(RETRY_DELAY_IN_MILLIS); return; } submitToCoordinatorThread(() -> { boolean allSucceeded = true; int dataMembersCount = nodeEngine.getClusterService().getMembers(DATA_MEMBER_SELECTOR).size(); int partitionCount = nodeEngine.getPartitionService().getPartitionCount(); // If the number of partitions is lower than the data member count, some members won't have // any partitions assigned. Jet doesn't use such members. int dataMembersWithPartitionsCount = Math.min(dataMembersCount, partitionCount); for (MasterContext mc : masterContexts.values()) { allSucceeded &= mc.jobContext().maybeScaleUp(dataMembersWithPartitionsCount); } if (!allSucceeded) { scheduleScaleUp(RETRY_DELAY_IN_MILLIS); } }); } /** * Scans all job records and updates quorum size of a split-brain protection enabled * job with current cluster quorum size if the current cluster quorum size is larger */ private void updateQuorumValues() { if (!shouldCheckQuorumValues()) { return; } submitToCoordinatorThread(() -> { try { int currentQuorumSize = getQuorumSize(); for (JobRecord jobRecord : jobRepository.getJobRecords()) { try { if (!jobRecord.getConfig().isSplitBrainProtectionEnabled()) { continue; } MasterContext masterContext = masterContexts.get(jobRecord.getJobId()); // if MasterContext doesn't exist, update in the IMap directly, using a sync method if (masterContext == null) { jobRepository.updateJobQuorumSizeIfSmaller(jobRecord.getJobId(), currentQuorumSize); // check the master context again, it might have been just created and have picked // up the JobRecord before being updated masterContext = masterContexts.get(jobRecord.getJobId()); } if (masterContext != null) { masterContext.updateQuorumSize(currentQuorumSize); } } catch (Exception e) { logger.severe("Quorum of job " + idToString(jobRecord.getJobId()) + " could not be updated to " + currentQuorumSize, e); } } } catch (Exception e) { logger.severe("update quorum values task failed", e); } }); } private boolean shouldCheckQuorumValues() { return isMaster() && nodeEngine.isRunning() && getInternalPartitionService().getPartitionStateManager().isInitialized(); } private Object deserializeJobDefinition(long jobId, JobConfig jobConfig, Data jobDefinitionData) { JobClassLoaderService jobClassLoaderService = jetServiceBackend.getJobClassLoaderService(); ClassLoader classLoader = jobClassLoaderService.getOrCreateClassLoader(jobConfig, jobId, COORDINATOR); try { jobClassLoaderService.prepareProcessorClassLoaders(jobId); return deserializeWithCustomClassLoader(nodeEngine().getSerializationService(), classLoader, jobDefinitionData); } finally { jobClassLoaderService.clearProcessorClassLoaders(); } } private String dagToJson(DAG dag) { int coopThreadCount = config.getCooperativeThreadCount(); return dag.toJson(coopThreadCount).toString(); } private CompletableFuture<Void> startJobIfNotStartedOrCompleted( @Nonnull JobRecord jobRecord, @Nonnull JobExecutionRecord jobExecutionRecord, String reason ) { // the order of operations is important. long jobId = jobRecord.getJobId(); JobResult jobResult = jobRepository.getJobResult(jobId); if (jobResult != null) { logger.fine("Not starting job " + idToString(jobId) + ", already has result: " + jobResult); return jobResult.asCompletableFuture(); } MasterContext masterContext; MasterContext oldMasterContext; synchronized (lock) { checkOperationalState(); masterContext = createMasterContext(jobRecord, jobExecutionRecord); oldMasterContext = masterContexts.putIfAbsent(jobId, masterContext); } if (oldMasterContext != null) { return oldMasterContext.jobContext().jobCompletionFuture(); } // If job is not currently running, it might be that it just completed. // Since we've put the MasterContext into the masterContexts map, someone else could // have joined to the job in the meantime so we should notify its future. if (completeMasterContextIfJobAlreadyCompleted(masterContext)) { return masterContext.jobContext().jobCompletionFuture(); } if (jobExecutionRecord.isSuspended()) { logFinest(logger, "MasterContext for suspended %s is created", masterContext.jobIdString()); } else { logger.info("Starting job " + idToString(jobId) + ": " + reason); tryStartJob(masterContext); } return masterContext.jobContext().jobCompletionFuture(); } // If a job result is present, it completes the master context using the job result private boolean completeMasterContextIfJobAlreadyCompleted(MasterContext masterContext) { long jobId = masterContext.jobId(); JobResult jobResult = jobRepository.getJobResult(jobId); if (jobResult != null) { logger.fine("Completing master context for " + masterContext.jobIdString() + " since already completed with result: " + jobResult); masterContext.jobContext().setFinalResult(jobResult.getFailureAsThrowable()); return masterContexts.remove(jobId, masterContext); } if (!masterContext.jobConfig().isAutoScaling() && masterContext.jobExecutionRecord().executed()) { logger.info("Suspending or failing " + masterContext.jobIdString() + " since auto-restart is disabled and the job has been executed before"); masterContext.jobContext().finalizeJob(new TopologyChangedException()); return true; } return false; } private void tryStartJob(MasterContext masterContext) { masterContext.jobContext().tryStartJob(jobRepository::newExecutionId); if (masterContext.hasTimeout()) { long remainingTime = masterContext.remainingTime(Clock.currentTimeMillis()); scheduleJobTimeout(masterContext.jobId(), Math.max(1, remainingTime)); } } private int getQuorumSize() { return (getDataMemberCount() / 2) + 1; } private int getDataMemberCount() { ClusterService clusterService = nodeEngine.getClusterService(); return clusterService.getMembers(DATA_MEMBER_SELECTOR).size(); } private JobSummary getJobSummary(JobRecord record) { MasterContext ctx = masterContexts.get(record.getJobId()); long execId = ctx == null ? 0 : ctx.executionId(); JobStatus status; if (ctx == null) { JobExecutionRecord executionRecord = jobRepository.getJobExecutionRecord(record.getJobId()); status = executionRecord != null && executionRecord.isSuspended() ? JobStatus.SUSPENDED : JobStatus.NOT_RUNNING; } else { status = ctx.jobStatus(); } return new JobSummary(false, record.getJobId(), execId, record.getJobNameOrId(), status, record.getCreationTime()); } private InternalPartitionServiceImpl getInternalPartitionService() { Node node = nodeEngine.getNode(); return (InternalPartitionServiceImpl) node.getPartitionService(); } // runs periodically to restart jobs on coordinator failure and perform GC private void scanJobs() { long nextScanDelay = maxJobScanPeriodInMillis; try { // explicit check for master because we don't want to use shorter delay on non-master nodes // it will be checked again in shouldStartJobs() if (isMaster()) { if (shouldStartJobs()) { doScanJobs(); } else { // use a smaller delay when cluster is not in ready state nextScanDelay = MIN_JOB_SCAN_PERIOD_MILLIS; } } } catch (HazelcastInstanceNotActiveException ignored) { // ignore this exception } catch (Throwable e) { logger.severe("Scanning jobs failed", e); } ExecutionService executionService = nodeEngine.getExecutionService(); executionService.schedule(this::scanJobs, nextScanDelay, MILLISECONDS); } private void doScanJobs() { Collection<JobRecord> jobs = jobRepository.getJobRecords(); for (JobRecord jobRecord : jobs) { JobExecutionRecord jobExecutionRecord = ensureExecutionRecord(jobRecord.getJobId(), jobRepository.getJobExecutionRecord(jobRecord.getJobId())); startJobIfNotStartedOrCompleted(jobRecord, jobExecutionRecord, "discovered by scanning of JobRecords"); } jobRepository.cleanup(nodeEngine); if (!jobsScanned) { synchronized (lock) { jobsScanned = true; } } } private JobExecutionRecord ensureExecutionRecord(long jobId, JobExecutionRecord record) { return record != null ? record : new JobExecutionRecord(jobId, getQuorumSize()); } @SuppressWarnings("WeakerAccess") // used by jet-enterprise void assertIsMaster(String error) { if (!isMaster()) { throw new JetException(error + ". Master address: " + nodeEngine.getClusterService().getMasterAddress()); } } private boolean isMaster() { return nodeEngine.getClusterService().isMaster(); } @SuppressWarnings("unused") // used in jet-enterprise NodeEngineImpl nodeEngine() { return nodeEngine; } CompletableFuture<Void> submitToCoordinatorThread(Runnable action) { return submitToCoordinatorThread(() -> { action.run(); return null; }); } <T> CompletableFuture<T> submitToCoordinatorThread(Callable<T> action) { // if we are on our thread already, execute directly in a blocking way if (IS_JOB_COORDINATOR_THREAD.get()) { try { return completedFuture(action.call()); } catch (Throwable e) { // most callers ignore the failure on the returned future, let's log it at least logger.warning(null, e); return com.hazelcast.jet.impl.util.Util.exceptionallyCompletedFuture(e); } } Future<T> future = nodeEngine.getExecutionService().submit(COORDINATOR_EXECUTOR_NAME, () -> { assert !IS_JOB_COORDINATOR_THREAD.get() : "flag already raised"; IS_JOB_COORDINATOR_THREAD.set(true); try { return action.call(); } catch (Throwable e) { // most callers ignore the failure on the returned future, let's log it at least logger.warning(null, e); throw e; } finally { IS_JOB_COORDINATOR_THREAD.set(false); } }); return nodeEngine.getExecutionService().asCompletableFuture(future); } void assertOnCoordinatorThread() { assert IS_JOB_COORDINATOR_THREAD.get() : "not on coordinator thread"; } private void completeObservables(Set<String> observables, Throwable error) { for (String observable : observables) { try { String ringbufferName = ObservableImpl.ringbufferName(observable); Ringbuffer<Object> ringbuffer = nodeEngine.getHazelcastInstance().getRingbuffer(ringbufferName); Object completion = error == null ? DoneItem.DONE_ITEM : WrappedThrowable.of(error); ringbuffer.addAsync(completion, OverflowPolicy.OVERWRITE); } catch (Exception e) { logger.severe("Failed to complete observable '" + observable + "': " + e, e); } } } /** * From the given list of execution IDs returns those which are unknown to * this coordinator. */ public long[] findUnknownExecutions(long[] executionIds) { return Arrays.stream(executionIds).filter(key -> { Object lmc = lightMasterContexts.get(key); return lmc == null || lmc instanceof LightMasterContext && ((LightMasterContext) lmc).isCancelled(); }).toArray(); } private void scheduleJobTimeout(final long jobId, final long timeout) { if (timeout <= 0) { return; } scheduledJobTimeouts.computeIfAbsent(jobId, id -> scheduleJobTimeoutTask(id, timeout)); } private void unscheduleJobTimeout(final long jobId) { final ScheduledFuture<?> timeoutFuture = scheduledJobTimeouts.remove(jobId); if (timeoutFuture != null) { timeoutFuture.cancel(true); } } private ScheduledFuture<?> scheduleJobTimeoutTask(final long jobId, final long timeout) { return this.nodeEngine().getExecutionService().schedule(() -> { final MasterContext mc = masterContexts.get(jobId); final LightMasterContext lightMc = (LightMasterContext) lightMasterContexts.get(jobId); try { if (mc != null && isMaster() && !mc.jobStatus().isTerminal()) { terminateJob(jobId, CANCEL_FORCEFUL); } else if (lightMc != null && !lightMc.isCancelled()) { lightMc.requestTermination(); } } finally { scheduledJobTimeouts.remove(jobId); } }, timeout, MILLISECONDS); } boolean isMemberShuttingDown(UUID uuid) { return membersShuttingDown.containsKey(uuid); } }
apache-2.0
theskyinflames/bpulse-go-client
vendor/github.com/youtube/vitess/go/vt/zktopo/convert.go
2481
package zktopo import ( "encoding/json" "path" "strings" "github.com/golang/protobuf/proto" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vschemapb "github.com/youtube/vitess/go/vt/proto/vschema" ) // This file contains utility functions to maintain backward compatibility // with old-style non-Backend Zookeeper topologies. The old // implementations (before 2016-08-17) used to deal with explicit data // types. We converted them to a generic []byte and path // interface. But the zookeeper implementation was not compatible with // this. // dataType is an enum for possible data types, used for backward // compatibility. type dataType int // Constants for type conversion const ( // newType is used to indicate a topology object type of // anything that is added after the topo.Backend refactor, // i.e. anything that doesn't require conversion between old // style topologies and the new style ones. The list of enum // values after this contain all types that exist at the // moment (2016-08-17) and doesn't need to be expanded when // something new is saved in the topology because it will be // saved in the new style, not in the old one. newType dataType = iota srvKeyspaceType srvVSchemaType ) // rawDataFromNodeValue convert the data of the given type into an []byte. // It is mindful of the backward compatibility, i.e. for newer objects // it doesn't do anything, but for old object types that were stored in JSON // format in converts them to proto3 binary encoding. func rawDataFromNodeValue(what dataType, data []byte) ([]byte, error) { var p proto.Message switch what { case srvKeyspaceType: p = &topodatapb.SrvKeyspace{} case srvVSchemaType: p = &vschemapb.SrvVSchema{} default: return data, nil } if err := json.Unmarshal(data, p); err != nil { return nil, err } return proto.Marshal(p) } // oldTypeAndFilePath returns the data type and old file path for a given path. func oldTypeAndFilePath(cell, filePath string) (dataType, string) { parts := strings.Split(filePath, "/") // SrvKeyspace: local cell, keyspaces/<keyspace>/SrvKeyspace if len(parts) == 3 && parts[0] == "keyspaces" && parts[2] == "SrvKeyspace" { return srvKeyspaceType, zkPathForSrvKeyspace(cell, parts[1]) } // SrvVSchema: local cell, SrvVSchema if len(parts) == 1 && parts[0] == "SrvVSchema" { return srvVSchemaType, zkPathForSrvVSchema(cell) } // General case. return newType, path.Join("/zk", cell, "vt", filePath) }
apache-2.0
tcpcloud/contrail-controller
src/vnsw/agent/test/test_vmport_cfg.cc
59044
/* * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include "base/os.h" #include <boost/assign/list_of.hpp> #include <cfg/cfg_init.h> #include <cfg/cfg_interface.h> #include <oper/operdb_init.h> #include <controller/controller_init.h> #include <controller/controller_ifmap.h> #include <pkt/pkt_init.h> #include <services/services_init.h> #include <vrouter/ksync/ksync_init.h> #include <cmn/agent_cmn.h> #include <base/task.h> #include <io/event_manager.h> #include <base/util.h> #include <oper/vn.h> #include <oper/vm.h> #include <oper/vm_interface.h> #include <oper/agent_sandesh.h> #include <oper/interface_common.h> #include <oper/vxlan.h> #include "vr_types.h" #include "testing/gunit.h" #include "test_cmn_util.h" #include "xmpp/test/xmpp_test_util.h" using namespace std; using namespace boost::assign; void RouterIdDepInit(Agent *agent) { } static void ValidateSandeshResponse(Sandesh *sandesh, vector<int> &result) { //TBD //Validate the response by the expectation } void DoInterfaceSandesh(std::string name) { ItfReq *itf_req = new ItfReq(); std::vector<int> result = list_of(1); Sandesh::set_response_callback(boost::bind(ValidateSandeshResponse, _1, result)); if (name != "") { itf_req->set_name(name); } itf_req->HandleRequest(); client->WaitForIdle(); itf_req->Release(); client->WaitForIdle(); } AgentIntfSandesh *CreateAgentIntfSandesh(const char *name) { return new AgentIntfSandesh("", "", "vnet1", "", "", "", "", "", "", "", "", ""); } class CfgTest : public ::testing::Test { public: virtual void SetUp() { agent_ = Agent::GetInstance(); } virtual void TearDown() { EXPECT_EQ(0U, Agent::GetInstance()->acl_table()->Size()); } Agent *agent_; }; TEST_F(CfgTest, AddDelVmPortNoVn_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, }; client->Reset(); IntfCfgAdd(input, 0); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(4U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->interface_config_table()->Size()); client->Reset(); IntfCfgDel(input, 0); client->WaitForIdle(); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(3U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->interface_config_table()->Size()); } TEST_F(CfgTest, AddDelExport) { client->Reset(); CfgIntKey *key = new CfgIntKey(MakeUuid(1)); CfgIntData *data = new CfgIntData(); boost::system::error_code ec; Ip4Address ip = Ip4Address::from_string("1.1.1.1", ec); data->Init(MakeUuid(1), MakeUuid(1), MakeUuid(kProjectUuid), "vnet1", ip, Ip6Address(), "00:00:00:01:01:01", "", VmInterface::kInvalidVlanId, VmInterface::kInvalidVlanId, CfgIntEntry::CfgIntVMPort, 0); DBRequest req; req.oper = DBRequest::DB_ENTRY_ADD_CHANGE; req.key.reset(key); req.data.reset(data); Agent::GetInstance()->interface_config_table()->Enqueue(&req); CfgIntKey *key1 = new CfgIntKey(MakeUuid(1)); CfgIntData *data1 = new CfgIntData(); ip = Ip4Address::from_string("1.1.1.1", ec); data1->Init(MakeUuid(1), MakeUuid(1), MakeUuid(kProjectUuid), "vnet1", ip, Ip6Address(), "00:00:00:01:01:01", "", VmInterface::kInvalidVlanId, VmInterface::kInvalidVlanId, CfgIntEntry::CfgIntVMPort, 0); req.key.reset(key1); req.data.reset(data1); req.oper = DBRequest::DB_ENTRY_DELETE; Agent::GetInstance()->interface_config_table()->Enqueue(&req); usleep(1000); EXPECT_EQ(0U, Agent::GetInstance()->interface_config_table()->Size()); } TEST_F(CfgTest, AddDelVmPortDepOnVmVn_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, }; // Nova Port add message - Should be inactive since VM and VN not present client->Reset(); IntfCfgAdd(input, 0); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(4U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->interface_config_table()->Size()); // Config VM Add - Port inactive since VN not present AddVm("vm1", 1); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); AddVrf("vrf1"); client->WaitForIdle(); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(VrfFind("vrf1")); // Config VN Add - Port inactive since interface oper-db not aware of // VM and VN added AddVn("vn1", 1); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VnFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); // Config Port add - Interface oper-db still inactive since no link between // VN and VRF client->Reset(); AddPort(input[0].name, input[0].intf_id); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add VN and VRF link. Port in-active since not linked to VM and VN client->Reset(); AddLink("virtual-network", "vn1", "routing-instance", "vrf1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add VM and Port link. Port in-active since port not linked to VN client->Reset(); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); //EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); // Add Port to VN link - Port is active client->Reset(); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); AddVmPortVrf("vnet1", "", 0); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); client->Reset(); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); AddLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); AddLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); // Delete Port to VN link. Port is inactive client->Reset(); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf1"); DelLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); DelLink("instance-ip", "instance0", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vnet1"); client->WaitForIdle(); // Delete config port entry. Port still present but inactive client->Reset(); DelLink("virtual-network", "vn1", "routing-instance", "vrf1"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface", "vnet1"); DelNode("virtual-machine", "vm1"); DelNode("virtual-network", "vn1"); client->WaitForIdle(); EXPECT_TRUE(VmPortFind(input, 0)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_FALSE(VmFind(1)); DelNode("routing-instance", "vrf1"); DelInstanceIp("instance0"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf1")); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); client->WaitForIdle(); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(3U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->interface_config_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vn_table()->Size()); EXPECT_FALSE(VnFind(1)); } TEST_F(CfgTest, AddDelVmPortDepOnVmVn_2) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, }; // Config VM Add - Port inactive since VN not present client->Reset(); AddVm("vm1", 1); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); // Nova Port add message - Should be inactive since VN not present client->Reset(); IntfCfgAdd(input, 0); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(4U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->interface_config_table()->Size()); // Config VN Add - Port inactive since interface oper-db not aware of // VM and VN added AddVn("vn1", 1); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_TRUE(VnFind(1)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); // Add link between VN and VRF. Interface still inactive client->Reset(); AddVrf("vrf2"); AddLink("virtual-network", "vn1", "routing-instance", "vrf2"); client->WaitForIdle(); EXPECT_TRUE(VrfFind("vrf2")); // Config Port add - Interface still inactive client->Reset(); AddPort(input[0].name, input[0].intf_id); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add Port to VM link - Port is inactive client->Reset(); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add vm-port interface to vrf link AddVmPortVrf("vnet1", "", 0); AddLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf2"); AddLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add Port to VN link - Port is active client->Reset(); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_TRUE(VmPortActive(input, 0)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf2"); DelLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vnet1"); DelLink("instance-ip", "instance0", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(3U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->interface_config_table()->Size()); client->Reset(); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelNode("virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); DelLink("virtual-network", "vn1", "routing-instance", "vrf2"); client->WaitForIdle(); DelNode("virtual-network", "vn1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); EXPECT_FALSE(VmFind(1)); EXPECT_FALSE(VmPortFind(input, 0)); DelNode("routing-instance", "vrf2"); DelInstanceIp("instance0"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf2")); } TEST_F(CfgTest, AddDelVmPortDepOnVmVn_3) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, }; // Nova Port add message - Should be inactive since VM and VN not present client->Reset(); IntfCfgAdd(input, 0); EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(4U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->interface_config_table()->Size()); // Config VM Add - Port inactive since VN not present AddVm("vm1", 1); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); AddVrf("vrf1"); client->WaitForIdle(); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(VrfFind("vrf1")); // Config VN Add - Port inactive since interface oper-db not aware of // VM and VN added AddVn("vn1", 1); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VnFind(1)); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); // Config Port add - Interface oper-db still inactive since no link between // VN and VRF client->Reset(); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); client->Reset(); AddPort(input[0].name, input[0].intf_id); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add vm-port interface to vrf link AddVmPortVrf("vnet1", "", 0); AddLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf1"); AddLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add VN and VRF link. Port in-active since not linked to VM and VN client->Reset(); AddLink("virtual-network", "vn1", "routing-instance", "vrf1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add VM and Port link. Port in-active since port not linked to VN client->Reset(); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); //EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); //Add instance ip configuration AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); //EXPECT_TRUE(client->PortNotifyWait(1)); EXPECT_TRUE(VmPortInactive(input, 0)); // Add Port to VN link - Port is active client->Reset(); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_TRUE(VmPortActive(input, 0)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf1"); DelLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); DelLink( "virtual-machine-interface", "vnet1", "instance-ip", "instance0"); DelNode("virtual-machine-interface-routing-instance", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); client->WaitForIdle(); // Delete VM and its associated links. INSTANCE_MSG is still not deleted // Vmport should be inactive DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); DelLink("virtual-network", "vn1", "routing-instance", "vrf1"); DelNode("routing-instance", "vrf1"); DelNode("virtual-network", "vn1"); DelNode("virtual-machine-interface", "vnet1"); DelInstanceIp("instance0"); client->WaitForIdle(); IntfCfgDel(input, 0); client->WaitForIdle(); } // VN has ACL set before VM Port is created TEST_F(CfgTest, VmPortPolicy_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, {"vnet2", 2, "1.1.1.2", "00:00:00:02:02:02", 1, 1}, }; client->Reset(); AddVm("vm1", 1); client->WaitForIdle(); AddAcl("acl1", 1); client->WaitForIdle(); AddVrf("vrf3"); client->WaitForIdle(); AddVn("vn1", 1); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(1)); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->acl_table()->Size()); EXPECT_TRUE(VrfFind("vrf3")); // Add vm-port interface to vrf link AddVmPortVrf("vmvrf1", "", 0); AddVmPortVrf("vmvrf2", "", 0); client->WaitForIdle(); AddPort(input[0].name, input[0].intf_id); AddPort(input[1].name, input[1].intf_id); AddLink("virtual-network", "vn1", "routing-instance", "vrf3"); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "vrf3"); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); AddLink("virtual-machine-interface-routing-instance", "vmvrf2", "routing-instance", "vrf3"); AddLink("virtual-machine-interface-routing-instance", "vmvrf2", "virtual-machine-interface", "vnet2"); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddInstanceIp("instance1", input[0].vm_id, input[1].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); AddLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); client->WaitForIdle(); client->Reset(); IntfCfgAdd(input, 0); IntfCfgAdd(input, 1); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortActive(input, 1)); EXPECT_FALSE(VmPortPolicyEnable(input, 0)); EXPECT_FALSE(VmPortPolicyEnable(input, 1)); AddLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(VmPortPolicyEnable(input, 0)); EXPECT_TRUE(VmPortPolicyEnable(input, 1)); client->Reset(); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(0)); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VmPortPolicyDisable(input, 0)); EXPECT_TRUE(VmPortPolicyDisable(input, 1)); client->Reset(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(1)); EXPECT_EQ(0U, Agent::GetInstance()->acl_table()->Size()); // Del VN to VRF link. Port should become inactive client->Reset(); DelLink("virtual-network", "vn1", "routing-instance", "vrf3"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortActive(input, 1)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "vrf3"); DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vmvrf1"); DelLink("virtual-machine-interface-routing-instance", "vmvrf2", "routing-instance", "vrf3"); DelLink("virtual-machine-interface-routing-instance", "vmvrf2", "virtual-machine-interface", "vnet2"); DelNode("virtual-machine-interface-routing-instance", "vmvrf2"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_TRUE(VmPortInactive(input, 1)); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); DelLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); // Delete config vm entry - no-op for oper-db. Port is active client->Reset(); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); // VM not deleted. Interface still refers to it EXPECT_FALSE(VmFind(1)); client->Reset(); DelNode("virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface", "vnet2"); EXPECT_TRUE(client->PortNotifyWait(2)); //After deleting vmport interface config, verify config name is set to "" const Interface *intf = VmPortGet(1); const VmInterface *vm_intf = static_cast<const VmInterface *>(intf); EXPECT_TRUE((vm_intf->cfg_name() == "")); intf = VmPortGet(2); vm_intf = static_cast<const VmInterface *>(intf); EXPECT_TRUE((vm_intf->cfg_name() == "")); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); IntfCfgDel(input, 1); EXPECT_TRUE(client->PortDelNotifyWait(2)); EXPECT_FALSE(VmFind(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(3U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); DelNode("virtual-network", "vn1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); DelNode("routing-instance", "vrf3"); DelInstanceIp("instance0"); DelInstanceIp("instance1"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf3")); } // ACL added after VM Port is created TEST_F(CfgTest, VmPortPolicy_2) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, {"vnet2", 2, "1.1.1.2", "00:00:00:02:02:02", 1, 1}, }; client->Reset(); AddVm("vm1", 1); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); AddVn("vn1", 1); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); client->Reset(); AddAcl("acl1", 1); EXPECT_TRUE(client->AclNotifyWait(1)); client->Reset(); AddPort(input[0].name, input[0].intf_id); AddPort(input[1].name, input[1].intf_id); client->Reset(); IntfCfgAdd(input, 0); IntfCfgAdd(input, 1); EXPECT_TRUE(client->PortNotifyWait(2)); // Port inactive since VRF is not yet present EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_TRUE(VmPortInactive(input, 1)); EXPECT_TRUE(VmPortPolicyDisable(input, 0)); EXPECT_TRUE(VmPortPolicyDisable(input, 1)); EXPECT_EQ(5U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(2U, Agent::GetInstance()->interface_config_table()->Size()); AddVrf("vrf4"); client->WaitForIdle(); EXPECT_TRUE(VrfFind("vrf4")); // Add vm-port interface to vrf link AddVmPortVrf("vmvrf1", "", 0); AddVmPortVrf("vmvrf2", "", 0); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "vrf4"); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); AddLink("virtual-machine-interface-routing-instance", "vmvrf2", "routing-instance", "vrf4"); AddLink("virtual-machine-interface-routing-instance", "vmvrf2", "virtual-machine-interface", "vnet2"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); EXPECT_TRUE(VmPortInactive(input, 1)); AddLink("virtual-network", "vn1", "routing-instance", "vrf4"); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddInstanceIp("instance1", input[0].vm_id, input[1].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); AddLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortActive(input, 1)); client->Reset(); AddLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(VmPortPolicyEnable(input, 0)); EXPECT_TRUE(VmPortPolicyEnable(input, 1)); client->Reset(); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(0)); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VmPortPolicyDisable(input, 0)); EXPECT_TRUE(VmPortPolicyDisable(input, 1)); client->Reset(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(1)); EXPECT_EQ(0U, Agent::GetInstance()->acl_table()->Size()); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "vrf4"); DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vmvrf1"); DelLink("virtual-machine-interface-routing-instance", "vmvrf2", "routing-instance", "vrf4"); DelLink("virtual-machine-interface-routing-instance", "vmvrf2", "virtual-machine-interface", "vnet2"); DelNode("virtual-machine-interface-routing-instance", "vmvrf2"); client->WaitForIdle(); DelLink("virtual-network", "vn1", "routing-instance", "vrf4"); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); DelLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); // Delete config vm entry - no-op for oper-db. Port is active client->Reset(); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_FALSE(VmFind(1)); EXPECT_TRUE(VmPortFind(input, 0)); EXPECT_EQ(5U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(2U, Agent::GetInstance()->interface_config_table()->Size()); DelPort(input[0].name); DelPort(input[1].name); client->Reset(); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); IntfCfgDel(input, 1); EXPECT_TRUE(client->PortDelNotifyWait(2)); EXPECT_FALSE(VmFind(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(3U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); // Del VN to VRF link. Port should become inactive client->Reset(); DelNode("virtual-network", "vn1"); DelInstanceIp("instance0"); DelInstanceIp("instance1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); DelNode("routing-instance", "vrf4"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf4")); } TEST_F(CfgTest, VnDepOnVrfAcl_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, {"vnet2", 2, "1.1.1.2", "00:00:00:02:02:02", 1, 1}, }; client->Reset(); AddVm("vm1", 1); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); client->Reset(); AddVrf("vrf5"); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(VrfFind("vrf5")); AddVn("vn1", 1); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); client->Reset(); AddAcl("acl1", 1); EXPECT_TRUE(client->AclNotifyWait(1)); AddLink("virtual-network", "vn1", "routing-instance", "vrf5"); client->WaitForIdle(); VnEntry *vn = VnGet(1); EXPECT_TRUE(vn->GetVrf() != NULL); EXPECT_TRUE(vn->GetAcl() == NULL); AddLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(vn->GetVrf() != NULL); EXPECT_TRUE(vn->GetAcl() != NULL); AddPort(input[0].name, input[0].intf_id); AddPort(input[1].name, input[1].intf_id); client->Reset(); client->Reset(); IntfCfgAdd(input, 0); IntfCfgAdd(input, 1); EXPECT_TRUE(client->PortNotifyWait(2)); // Add vm-port interface to vrf link AddVmPortVrf("vnet1", "", 0); AddLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf5"); AddLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Add vm-port interface to vrf link AddVmPortVrf("vnet2", "", 0); AddLink("virtual-machine-interface-routing-instance", "vnet2", "routing-instance", "vrf5"); AddLink("virtual-machine-interface-routing-instance", "vnet2", "virtual-machine-interface", "vnet2"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 1)); // Port Active since VRF and VM already added AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddInstanceIp("instance1", input[0].vm_id, input[1].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); AddLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortActive(input, 1)); EXPECT_TRUE(VmPortPolicyEnable(input, 0)); EXPECT_TRUE(VmPortPolicyEnable(input, 1)); EXPECT_EQ(5U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(2U, Agent::GetInstance()->interface_config_table()->Size()); client->Reset(); AddLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortActive(input, 1)); EXPECT_TRUE(VmPortPolicyEnable(input, 0)); EXPECT_TRUE(VmPortPolicyEnable(input, 1)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnet1", "routing-instance", "vrf5"); DelLink("virtual-machine-interface-routing-instance", "vnet1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 0)); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnet2", "routing-instance", "vrf5"); DelLink("virtual-machine-interface-routing-instance", "vnet2", "virtual-machine-interface", "vnet2"); DelNode("virtual-machine-interface-routing-instance", "vnet2"); client->WaitForIdle(); EXPECT_TRUE(VmPortInactive(input, 1)); client->Reset(); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_TRUE(VmPortPolicyDisable(input, 0)); EXPECT_TRUE(VmPortPolicyDisable(input, 1)); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet2"); DelLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); DelLink("virtual-machine-interface", input[1].name, "instance-ip", "instance1"); client->WaitForIdle(); DelPort(input[0].name); DelPort(input[1].name); client->Reset(); client->Reset(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); EXPECT_TRUE(client->AclNotifyWait(1)); EXPECT_EQ(0U, Agent::GetInstance()->acl_table()->Size()); // Delete config vm entry - no-op for oper-db. Port is active client->Reset(); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_FALSE(VmFind(1)); EXPECT_TRUE(VmPortFind(input, 0)); EXPECT_EQ(5U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(2U, Agent::GetInstance()->interface_config_table()->Size()); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); IntfCfgDel(input, 1); EXPECT_TRUE(client->PortNotifyWait(2)); EXPECT_FALSE(VmFind(1)); EXPECT_FALSE(VmPortFind(input, 0)); WAIT_FOR(100, 1000, (3U == Agent::GetInstance()->interface_table()->Size())); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); // Del VN to VRF link. Port should become inactive client->Reset(); DelLink("virtual-network", "vn1", "routing-instance", "vrf5"); DelNode("virtual-network", "vn1"); DelInstanceIp("instance0"); DelInstanceIp("instance1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); DelNode("routing-instance", "vrf5"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf5")); } //TBD //Reduce the waitforidle to improve on timing of UT TEST_F(CfgTest, FloatingIp_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1}, }; client->WaitForIdle(); client->Reset(); AddVm("vm1", 1); client->WaitForIdle(); EXPECT_TRUE(client->VmNotifyWait(1)); EXPECT_TRUE(VmFind(1)); client->Reset(); AddVrf("vrf6"); client->WaitForIdle(); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(VrfFind("vrf6")); AddVn("vn1", 1); client->WaitForIdle(); EXPECT_TRUE(client->VnNotifyWait(1)); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); AddLink("virtual-network", "vn1", "routing-instance", "vrf6"); client->WaitForIdle(); client->Reset(); IntfCfgAdd(input, 0); client->WaitForIdle(); EXPECT_TRUE(client->PortNotifyWait(1)); AddPort(input[0].name, input[0].intf_id); client->WaitForIdle(); // Create floating-ip on default-project:vn2 client->Reset(); AddVn("default-project:vn2", 2); client->WaitForIdle(); EXPECT_TRUE(client->VnNotifyWait(1)); AddVrf("default-project:vn2:vn2"); AddVrf("vrf8"); client->WaitForIdle(); EXPECT_TRUE(client->VrfNotifyWait(2)); EXPECT_TRUE(VrfFind("default-project:vn2:vn2")); EXPECT_TRUE(VrfFind("vrf8")); AddFloatingIpPool("fip-pool1", 1); AddFloatingIp("fip1", 1, "1.1.1.1"); AddFloatingIp("fip3", 3, "2.2.2.5"); AddFloatingIp("fip4", 4, "2.2.2.1"); client->WaitForIdle(); AddLink("virtual-network", "default-project:vn2", "routing-instance", "default-project:vn2:vn2"); AddLink("floating-ip-pool", "fip-pool1", "virtual-network", "default-project:vn2"); AddLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1"); AddLink("floating-ip", "fip3", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); AddLink("floating-ip", "fip4", "floating-ip-pool", "fip-pool1"); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip3"); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip4"); client->WaitForIdle(); LOG(DEBUG, "Adding Floating-ip fip2"); AddFloatingIp("fip2", 2, "2.2.2.2"); client->WaitForIdle(); // Port Active since VRF and VM already added client->Reset(); AddLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); AddLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); // Add vm-port interface to vrf link AddVmPortVrf("vnvrf1", "", 0); AddLink("virtual-machine-interface-routing-instance", "vnvrf1", "routing-instance", "vrf6"); AddLink("virtual-machine-interface-routing-instance", "vnvrf1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); EXPECT_TRUE(VmPortFloatingIpCount(1, 3)); LOG(DEBUG, "Link fip2 to fip-pool1"); AddLink("floating-ip", "fip2", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip2"); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip3"); DelLink("floating-ip", "fip3", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelNode("floating-ip", "fip3"); client->WaitForIdle(); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip4"); DelLink("floating-ip", "fip4", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelNode("floating-ip", "fip4"); client->WaitForIdle(); EXPECT_TRUE(VmPortFloatingIpCount(1, 2)); AddLink("virtual-network", "default-project:vn2", "routing-instance", "vrf6"); client->WaitForIdle(); AddLink("virtual-network", "default-project:vn2", "routing-instance", "vrf8"); client->WaitForIdle(); DelLink("virtual-network", "vn1", "routing-instance", "vrf6"); client->WaitForIdle(); DelLink("virtual-network", "default-project:vn2", "routing-instance", "vrf6"); client->WaitForIdle(); DelLink("virtual-network", "default-project:vn2", "routing-instance", "vrf8"); client->WaitForIdle(); DelLink("virtual-network", "default-project:vn2", "routing-instance", "default-project:vn2:vn2"); client->WaitForIdle(); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); client->WaitForIdle(); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip2"); client->WaitForIdle(); DelLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelLink("floating-ip", "fip2", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelLink("virtual-network", "default-project:vn2", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelLink("virtual-machine-interface", input[0].name, "instance-ip", "instance1"); client->WaitForIdle(); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vnvrf1", "routing-instance", "vrf6"); DelLink("virtual-machine-interface-routing-instance", "vnvrf1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vnvrf1"); client->WaitForIdle(); DelNode("floating-ip", "fip1"); client->WaitForIdle(); DelNode("floating-ip", "fip2"); client->WaitForIdle(); EXPECT_TRUE(VmPortFloatingIpCount(1, 0)); DelNode("floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelNode("routing-instance", "vrf6"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf6")); DelNode("routing-instance", "default-project:vn2:vn2"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("default-project:vn2:vn2")); DelNode("routing-instance", "vrf8"); client->WaitForIdle(); EXPECT_FALSE(VrfFind("vrf8")); DelNode("virtual-network", "vn1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); DelNode("virtual-network", "default-project:vn2"); client->WaitForIdle(); EXPECT_FALSE(VnFind(2)); DelNode("virtual-machine", "vm1"); DelLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); DelInstanceIp("instance0"); client->WaitForIdle(); EXPECT_FALSE(VmFind(1)); IntfCfgDel(input, 0); client->WaitForIdle(); EXPECT_FALSE(VmPortFind(input, 0)); #if 0 DelLink("virtual-network", "vn1", "virtual-machine-interface", "vnet1"); DelLink("virtual-machine", "vm1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); LOG(DEBUG, "Cleanup implementation pending..."); // Delete config vm entry - no-op for oper-db. Port is active client->Reset(); DelNode("virtual-machine", "vm1"); client->WaitForIdle(); EXPECT_TRUE(VnFind(1)); EXPECT_FALSE(VmFind(1)); EXPECT_TRUE(VmPortFind(input, 0)); EXPECT_EQ(4U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(2U, Agent::GetInstance()->interface_config_table()->Size()); // Delete Nova Port entry. client->Reset(); IntfCfgDel(input, 0); IntfCfgDel(input, 1); EXPECT_TRUE(client->PortNotifyWait(2)); EXPECT_FALSE(VmFind(1)); EXPECT_FALSE(VmPortFind(input, 0)); EXPECT_EQ(2U, Agent::GetInstance()->interface_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); EXPECT_EQ(1U, Agent::GetInstance()->vn_table()->Size()); EXPECT_EQ(0U, Agent::GetInstance()->vm_table()->Size()); // Del VN to VRF link. Port should become inactive client->Reset(); DelLink("virtual-network", "vn1", "routing-instance", "vrf5"); DelNode("virtual-network", "vn1"); client->WaitForIdle(); EXPECT_FALSE(VnFind(1)); #endif } TEST_F(CfgTest, Basic_1) { string eth_intf = "eth10"; string vrf_name = "__non_existent_vrf__"; //char buff[4096]; //int len = 0; struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 5, 5}, }; PhysicalInterfaceKey key(eth_intf); PhysicalInterface *phy_intf = NULL; client->Reset(); PhysicalInterface::CreateReq(Agent::GetInstance()->interface_table(), eth_intf, Agent::GetInstance()->fabric_vrf_name(), PhysicalInterface::FABRIC, PhysicalInterface::ETHERNET, false, nil_uuid(), Ip4Address(0), Interface::TRANSPORT_ETHERNET); client->WaitForIdle(); phy_intf = static_cast<PhysicalInterface *> (agent_->interface_table()->FindActiveEntry(&key)); EXPECT_TRUE(phy_intf->persistent() == false); EXPECT_TRUE(phy_intf->subtype() == PhysicalInterface::FABRIC); InetInterface::CreateReq(Agent::GetInstance()->interface_table(), "vhost10", InetInterface::VHOST, Agent::GetInstance()->fabric_vrf_name(), Ip4Address(0), 0, Ip4Address(0), eth_intf, "", Interface::TRANSPORT_ETHERNET); client->WaitForIdle(); AddVn("default-project:vn5", 5); client->WaitForIdle(); EXPECT_TRUE(client->VnNotifyWait(1)); AddVm("vm5", 5); client->WaitForIdle(); EXPECT_TRUE(client->VmNotifyWait(1)); AddVrf("default-project:vn5:vn5"); client->WaitForIdle(); EXPECT_TRUE(client->VrfNotifyWait(1)); EXPECT_TRUE(VrfFind("default-project:vn5:vn5")); AddFloatingIpPool("fip-pool1", 1); AddFloatingIp("fip1", 1, "10.10.10.1"); AddFloatingIp("fip2", 2, "2.2.2.2"); AddFloatingIp("fip3", 3, "30.30.30.1"); client->WaitForIdle(); IntfCfgAdd(input, 0); client->WaitForIdle(); AddPort(input[0].name, input[0].intf_id); client->WaitForIdle(); AddLink("virtual-network", "default-project:vn5", "routing-instance", "default-project:vn5:vn5"); client->WaitForIdle(); AddLink("floating-ip-pool", "fip-pool1", "virtual-network", "default-project:vn5"); client->WaitForIdle(); AddLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); AddLink("floating-ip", "fip2", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); AddLink("floating-ip", "fip3", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip2"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip3"); client->WaitForIdle(); AddLink("virtual-network", "default-project:vn5", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); AddLink("virtual-machine", "vm5", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); AddInstanceIp("instance0", input[0].vm_id, input[0].addr); AddLink("virtual-machine-interface", input[0].name, "instance-ip", "instance0"); client->WaitForIdle(); // Add vm-port interface to vrf link AddVmPortVrf("vmvrf1", "", 0); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "default-project:vn5:vn5"); AddLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); client->WaitForIdle(); std::vector<int> result = list_of(1); Sandesh::set_response_callback(boost::bind(ValidateSandeshResponse, _1, result)); AgentSandeshPtr sand_1(CreateAgentIntfSandesh("vnet1")); sand_1->DoSandesh(sand_1); client->WaitForIdle(); AgentSandeshPtr sand_2(CreateAgentIntfSandesh("eth10")); sand_2->DoSandesh(sand_2); client->WaitForIdle(); AgentSandeshPtr sand_3(CreateAgentIntfSandesh("pkt0")); sand_3->DoSandesh(sand_3); client->WaitForIdle(); AgentSandeshPtr sand_4(CreateAgentIntfSandesh("vhost10")); sand_4->DoSandesh(sand_4); client->WaitForIdle(); AgentSandeshPtr sand_5(CreateAgentIntfSandesh("vhost10")); sand_5->DoSandesh(sand_5, 0, 1); client->WaitForIdle(); InetInterface::DeleteReq(Agent::GetInstance()->interface_table(), "vhost10"); client->WaitForIdle(); PhysicalInterface::DeleteReq(Agent::GetInstance()->interface_table(), eth_intf); client->WaitForIdle(); client->Reset(); DelLink("virtual-network", "default-project:vn5", "routing-instance", "default-project:vn5:vn5"); client->WaitForIdle(); DelLink("floating-ip-pool", "fip-pool1", "virtual-network", "default-project:vn5"); client->WaitForIdle(); DelLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelLink("floating-ip", "fip2", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelLink("floating-ip", "fip3", "floating-ip-pool", "fip-pool1"); client->WaitForIdle(); DelNode("floating-ip-pool", "fip-pool1"); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); client->WaitForIdle(); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip2"); client->WaitForIdle(); DelLink("virtual-machine-interface", "vnet1", "floating-ip", "fip3"); client->WaitForIdle(); DelLink("virtual-network", "default-project:vn5", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelLink("virtual-machine", "vm5", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); DelLink("instance-ip", "instance0", "virtual-machine-interface", "vnet1"); client->WaitForIdle(); // Delete virtual-machine-interface to vrf link attribute DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "routing-instance", "default-project:vn5:vn5"); DelLink("virtual-machine-interface-routing-instance", "vmvrf1", "virtual-machine-interface", "vnet1"); DelNode("virtual-machine-interface-routing-instance", "vmvrf1"); client->WaitForIdle(); client->Reset(); IntfCfgDel(input, 0); DelPort(input[0].name); client->WaitForIdle(); client->Reset(); DelNode("floating-ip", "fip1"); DelNode("floating-ip", "fip2"); DelNode("floating-ip", "fip3"); client->WaitForIdle(); DelNode("virtual-machine", "vm5"); client->WaitForIdle(); DelNode("routing-instance", "default-project:vn5:vn5"); DelInstanceIp("instance0"); client->WaitForIdle(); DelNode("virtual-network", "default-project:vn5"); client->WaitForIdle(); WAIT_FOR(1000, 1000, (0 == Agent::GetInstance()->vm_table()->Size())); WAIT_FOR(1000, 1000, (VnFind(5) == false)); WAIT_FOR(1000, 1000, (VmFind(5) == false)); } TEST_F(CfgTest, Basic_2) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1} }; CreateVmportEnv(input, 1); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); VmInterfaceKey key(AgentKey::ADD_DEL_CHANGE, MakeUuid(1), ""); VmInterface *intf = static_cast<VmInterface *> (Agent::GetInstance()->interface_table()->FindActiveEntry(&key)); EXPECT_TRUE(intf != NULL); if (intf == NULL) { return; } InetUnicastAgentRouteTable *table = Agent::GetInstance()->fabric_inet4_unicast_table(); InetUnicastRouteEntry *rt = static_cast<InetUnicastRouteEntry *> (table->FindRoute(intf->mdata_ip_addr())); EXPECT_TRUE(rt != NULL); if (rt == NULL) { return; } const NextHop *nh = rt->GetActiveNextHop(); EXPECT_TRUE(nh != NULL); if (nh == NULL) { return; } EXPECT_TRUE(nh->PolicyEnabled()); Ip4Address addr = Ip4Address::from_string("1.1.1.1"); table = static_cast<InetUnicastAgentRouteTable *> (Agent::GetInstance()->vrf_table()->GetInet4UnicastRouteTable("vrf1")); rt = table->FindRoute(addr); EXPECT_TRUE(rt != NULL); if (rt == NULL) { return; } nh = rt->GetActiveNextHop(); EXPECT_TRUE(nh != NULL); if (nh == NULL) { return; } EXPECT_FALSE(nh->PolicyEnabled()); DeleteVmportEnv(input, 1, true); client->WaitForIdle(); EXPECT_FALSE(VmPortFind(1)); } TEST_F(CfgTest, SecurityGroup_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1} }; CreateVmportEnv(input, 1); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); AddSg("sg1", 1); AddAcl("acl1", 1); AddLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); client->WaitForIdle(); VmInterfaceKey key(AgentKey::ADD_DEL_CHANGE, MakeUuid(1), ""); VmInterface *intf = static_cast<VmInterface *> (Agent::GetInstance()->interface_table()->FindActiveEntry(&key)); EXPECT_TRUE(intf != NULL); if (intf == NULL) { return; } EXPECT_TRUE(intf->sg_list().list_.size() == 1); DoInterfaceSandesh("vnet1"); Ip4Address addr(Ip4Address::from_string("1.1.1.1")); InetUnicastAgentRouteTable *table = static_cast<InetUnicastAgentRouteTable *> (Agent::GetInstance()->vrf_table()->GetInet4UnicastRouteTable("vrf1")); InetUnicastRouteEntry *rt = table->FindRoute(addr); EXPECT_TRUE(rt != NULL); if (rt == NULL) { return; } const AgentPath *path = rt->GetActivePath(); EXPECT_EQ(path->sg_list().size(), 1); EXPECT_TRUE(path->vxlan_id() == VxLanTable::kInvalidvxlan_id); EXPECT_TRUE(path->tunnel_bmap() == TunnelType::MplsType()); DoInterfaceSandesh("vnet1"); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); DelLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); DeleteVmportEnv(input, 1, true); DelNode("security-group", "sg1"); client->WaitForIdle(); EXPECT_FALSE(VmPortFind(1)); } TEST_F(CfgTest, SecurityGroup_ignore_invalid_sgid_1) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1} }; CreateVmportEnv(input, 1); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); AddSg("sg1", 1, 0); AddAcl("acl1", 1); AddLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); client->WaitForIdle(); //Query for SG SgKey *key = new SgKey(MakeUuid(1)); const SgEntry *sg_entry = static_cast<const SgEntry *>(Agent::GetInstance()->sg_table()-> FindActiveEntry(key)); EXPECT_TRUE(sg_entry == NULL); //Modify SGID AddSg("sg1", 1, 2); client->WaitForIdle(); sg_entry = static_cast<const SgEntry *>(Agent::GetInstance()->sg_table()-> FindActiveEntry(key)); EXPECT_TRUE(sg_entry != NULL); EXPECT_TRUE(sg_entry->GetSgId() == 2); AddSg("sg1", 1, 3); client->WaitForIdle(); sg_entry = static_cast<const SgEntry *>(Agent::GetInstance()->sg_table()-> FindActiveEntry(key)); EXPECT_TRUE(sg_entry != NULL); EXPECT_TRUE(sg_entry->GetSgId() == 3); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); DelLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); DeleteVmportEnv(input, 1, true); DelNode("security-group", "sg1"); client->WaitForIdle(); delete key; EXPECT_FALSE(VmPortFind(1)); } // Test invalid sgid with interface update TEST_F(CfgTest, SecurityGroup_ignore_invalid_sgid_2) { struct PortInfo input[] = { {"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1} }; CreateVmportEnv(input, 1); client->WaitForIdle(); EXPECT_TRUE(VmPortActive(input, 0)); AddSg("sg1", 1, 0); AddAcl("acl1", 1); AddLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); AddLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); client->WaitForIdle(); VmInterfaceKey key(AgentKey::ADD_DEL_CHANGE, MakeUuid(1), ""); VmInterface *intf = static_cast<VmInterface *> (Agent::GetInstance()->interface_table()->FindActiveEntry(&key)); EXPECT_TRUE(intf != NULL); if (intf == NULL) { return; } EXPECT_TRUE(intf->sg_list().list_.size() == 0); // Add with proper sg id AddSg("sg1", 1, 1); client->WaitForIdle(); EXPECT_TRUE(intf->sg_list().list_.size() == 1); VmInterface::SecurityGroupEntrySet::const_iterator it = intf->sg_list().list_.begin(); EXPECT_TRUE(it->sg_.get() != NULL); EXPECT_TRUE(it->sg_->GetSgId() == 1); DelLink("virtual-network", "vn1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "access-control-list", "acl1"); DelLink("virtual-machine-interface", "vnet1", "security-group", "sg1"); DelLink("security-group", "sg1", "access-control-list", "acl1"); client->WaitForIdle(); DelNode("access-control-list", "acl1"); client->WaitForIdle(); DeleteVmportEnv(input, 1, true); DelNode("security-group", "sg1"); client->WaitForIdle(); EXPECT_FALSE(VmPortFind(1)); } int main(int argc, char **argv) { GETUSERARGS(); client = TestInit(init_file, ksync_init); int ret = RUN_ALL_TESTS(); TestShutdown(); delete client; return ret; }
apache-2.0
OHDSI/ETL-CDMBuilder
source/org.ohdsi.cdm.framework.etl/org.ohdsi.cdm.framework.etl.cprdhes/ETL/CPRDHES/Lookups/icd10.sql
1082
{base}, Standard as ( SELECT distinct SOURCE_CODE, TARGET_CONCEPT_ID, TARGET_DOMAIN_ID, SOURCE_VALID_START_DATE as VALID_START_DATE, SOURCE_VALID_END_DATE as VALID_END_DATE, SOURCE_VOCABULARY_ID FROM Source_to_Standard WHERE lower(SOURCE_VOCABULARY_ID) IN ('icd10') ), Source as ( SELECT distinct SOURCE_CODE, TARGET_CONCEPT_ID, TARGET_DOMAIN_ID FROM Source_to_Source WHERE lower(SOURCE_VOCABULARY_ID) IN ('icd10') ), S_S as ( select SOURCE_CODE from Standard union select SOURCE_CODE from Source ) select distinct S_S.SOURCE_CODE, Standard.TARGET_CONCEPT_ID, Standard.TARGET_DOMAIN_ID, Standard.VALID_START_DATE, Standard.VALID_END_DATE, Standard.SOURCE_VOCABULARY_ID, Source.TARGET_CONCEPT_ID as SOURCE_TARGET_CONCEPT_ID, cast('1900/1/1' as date) as SOURCE_validStartDate, cast('2100/1/1' as date) as SOURCE_validEndDate, ingredient_level.ingredient_concept_id from S_S left join Standard on Standard.SOURCE_CODE = S_S.SOURCE_CODE left join Source on Source.SOURCE_CODE = S_S.SOURCE_CODE left join ingredient_level on ingredient_level.concept_id = Standard.TARGET_CONCEPT_ID
apache-2.0
wilebeast/FireFox-OS
B2G/gecko/dom/devicestorage/test/test_enumerate.html
2907
<!-- Any copyright is dedicated to the Public Domain. http://creativecommons.org/publicdomain/zero/1.0/ --> <!DOCTYPE HTML> <html> <!-- https://bugzilla.mozilla.org/show_bug.cgi?id=717103 --> <head> <title>Test for the device storage API </title> <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> <script type="text/javascript" src="devicestorage_common.js"></script> <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> </head> <body> <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=717103">Mozilla Bug 717103</a> <p id="display"></p> <div id="content" style="display: none"> </div> <pre id="test"> <script class="testbody" type="text/javascript"> devicestorage_setup(); function enumerateSuccess(e) { if (e.target.result == null) { ok(files.length == 0, "when the enumeration is done, we shouldn't have any files in this array") dump("We still have length = " + files.length + "\n"); devicestorage_cleanup(); return; } var filename = e.target.result.name; if (filename[0] == "/") { // We got /storgaeName/prefix/filename // Remove the storageName (this shows up on FirefoxOS) filename = filename.substring(1); // Remove leading slash var slashIndex = filename.indexOf("/"); if (slashIndex >= 0) { filename = filename.substring(slashIndex + 1); // Remove storageName } } if (filename.startsWith(prefix)) { filename = filename.substring(prefix.length + 1); // Remove prefix } var index = files.indexOf(filename); files.remove(index); ok(index > -1, "filename should be in the enumeration : " + e.target.result.name); // clean up var cleanup = storage.delete(e.target.result.name); cleanup.onsuccess = function(e) {} // todo - can i remove this? e.target.continue(); } function handleError(e) { ok(false, "handleError was called : " + e.target.error.name); devicestorage_cleanup(); } function addSuccess(e) { addedSoFar = addedSoFar + 1; if (addedSoFar == files.length) { var cursor = storage.enumerate(prefix); cursor.onsuccess = enumerateSuccess; cursor.onerror = handleError; } } function addError(e) { ok(false, "addError was called : " + e.target.error.name); devicestorage_cleanup(); } var storage = navigator.getDeviceStorage("pictures"); ok(navigator.getDeviceStorage, "Should have getDeviceStorage"); var prefix = "devicestorage/" + randomFilename(12) + ".png" var files = [ "a.PNG", "b.pnG", "c.png", "d/a.png", "d/b.png", "d/c.png", "d/d.png", "The/quick/brown/fox/jumps/over/the/lazy/dog.png"] var addedSoFar = 0; for (var i=0; i<files.length; i++) { request = storage.addNamed(createRandomBlob('image/png'), prefix + '/' + files[i]); ok(request, "Should have a non-null request"); request.onsuccess = addSuccess; request.onerror = addError; } </script> </pre> </body> </html>
apache-2.0
cityzendata/warp10-platform
warp10/src/main/java/io/warp10/WarpManager.java
1972
// // Copyright 2019 SenX S.A.S. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package io.warp10; import java.util.HashMap; import java.util.Map; import java.util.Properties; import io.warp10.continuum.Configuration; /** * Class used to control various aspects of the Warp 10 platform */ public class WarpManager { private static final String MANAGER_SECRET; public static final String UPDATE_DISABLED = "update.disabled"; public static final String META_DISABLED = "meta.disabled"; public static final String DELETE_DISABLED = "delete.disabled"; private static final Map<String,Object> attributes = new HashMap<String,Object>(); static { attributes.put(UPDATE_DISABLED, WarpConfig.getProperty(Configuration.WARP_UPDATE_DISABLED)); attributes.put(META_DISABLED, WarpConfig.getProperty(Configuration.WARP_META_DISABLED)); attributes.put(DELETE_DISABLED, WarpConfig.getProperty(Configuration.WARP_DELETE_DISABLED)); MANAGER_SECRET = WarpConfig.getProperty(Configuration.WARP10_MANAGER_SECRET); } public static Object getAttribute(String attr) { return attributes.get(attr); } public static synchronized Object setAttribute(String attr, Object value) { return attributes.put(attr, value); } public static boolean checkSecret(String secret) { if (null == MANAGER_SECRET) { return false; } else { return MANAGER_SECRET.equals(secret); } } }
apache-2.0
alxndrsn/enketo-core
src/widget/note/notewidget.js
2293
/** * @preserve Copyright 2012 Martijn van de Rijdt & Modilabs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ define( [ 'enketo-js/Widget', 'jquery', 'enketo-js/plugins' ], function( Widget, $ ) { 'use strict'; var pluginName = 'notewidget'; /** * Enhances notes * * @constructor * @param {Element} element [description] * @param {(boolean|{touch: boolean, repeat: boolean})} options options * @param {*=} e event */ function Notewidget( element, options ) { this.namespace = pluginName; Widget.call( this, element, options ); this._init(); } //copy the prototype functions from the Widget super class Notewidget.prototype = Object.create( Widget.prototype ); //ensure the constructor is the new one Notewidget.prototype.constructor = Notewidget; Notewidget.prototype._init = function() { var $el = $( this.element ); $el.find( '.question-label' ).markdownToHtml() .end().find( '[readonly]' ).addClass( 'ignore' ); if ( $el.is( '.note' ) && !$el.next().is( '.note' ) ) { $el.addClass( 'last-of-class' ); } }; Notewidget.prototype.destroy = function( element ) {}; $.fn[ pluginName ] = function( options, event ) { return this.each( function() { var $this = $( this ), data = $this.data( pluginName ); options = options || {}; if ( !data && typeof options === 'object' ) { $this.data( pluginName, ( data = new Notewidget( this, options, event ) ) ); } else if ( data && typeof options === 'string' ) { data[ options ]( this ); } } ); }; return pluginName; } );
apache-2.0
FOSSology-SPDX/fossology-spdx
utils/Makefile
2158
# FOSSology+SPDX Makefile - utils/ # Copyright (C) 2013 University of Nebraska at Omaha. # Create and install: # fossyspdx-postinstall TOP=.. VARS=$(TOP)/Makefile.conf include $(VARS) CONFPATH=$(SYSCONFDIR) all: fossyspdx-postinstall defconf/fossologyspdx.conf # include the preprocessing stuff include $(TOP)/Makefile.process # generate the postinstall script fossyspdx-postinstall: fossyspdx-postinstall-process chmod +x fossyspdx-postinstall # generate fossologyspdx.conf defconf/fossologyspdx.conf: defconf/fossologyspdx.conf-process install: all $(INSTALL_PROGRAM) fossyspdx-postinstall $(DESTDIR)$(LIBEXECDIR)/fossyspdx-postinstall $(INSTALL_PROGRAM) fossyspdxinit.php $(DESTDIR)$(LIBEXECDIR)/fossyspdxinit.php echo "Making configuration directories"; \ if [ ! -e $(DESTDIR)$(CONFPATH) ] ; then \ mkdir -p $(DESTDIR)$(CONFPATH); \ fi if [ ! -e $(DESTDIR)$(CONFPATH)/conf ] ; then \ mkdir -p $(DESTDIR)$(CONFPATH)/conf; \ fi echo "Installing configuration files..." if [ ! -f $(DESTDIR)$(CONFPATH)/fossologyspdx.conf -o "$(OVERWRITE)" ] ; then \ echo "NOTE: using default version for $(DESTDIR)$(CONFPATH)/fossologyspdx.conf"; \ $(INSTALL) -m 666 defconf/fossologyspdx.conf $(DESTDIR)$(CONFPATH)/fossologyspdx.conf; \ else \ echo "WARNING: $(DESTDIR)$(CONFPATH)/fossologyspdx.conf already exists."; \ echo " Not overwriting, consider checking it by hand or use the OVERWRITE option."; \ fi uninstall: rm -f $(DESTDIR)$(LIBEXECDIR)/fossyspdx-postinstall rm -f $(DESTDIR)$(LIBEXECDIR)/fossyspdxinit.php @if [ -d $(DESTDIR)$(LIBEXECDIR) ]; then \ if [ "`ls -A $(DESTDIR)$(LIBEXECDIR)`" ]; then \ echo "WARNING: $(DESTDIR)$(LIBEXECDIR) not empty, not removing";\ else \ rmdir $(DESTDIR)$(LIBEXECDIR); \ fi; \ fi @echo "Configuration files will not be removed:" @echo " $(DESTDIR)$(CONFPATH)/fossologyspdx.conf" @echo "Remove by hand if you desire." clean: rm -f fossyspdx-postinstall defconf/fossologyspdx.conf test: all @echo "NOTICE: no tests available for install" coverage: @echo "NOTICE: no coverage available for install" .PHONY: all install uninstall clean test coverage
apache-2.0
flofreud/aws-sdk-java
aws-java-sdk-lambda/src/main/java/com/amazonaws/services/lambda/model/transform/GetFunctionConfigurationRequestMarshaller.java
3688
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.lambda.model.transform; import static com.amazonaws.util.StringUtils.UTF8; import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.regex.Pattern; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.lambda.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.BinaryUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.IdempotentUtils; import com.amazonaws.util.StringInputStream; import com.amazonaws.util.SdkHttpUtils; import com.amazonaws.protocol.json.*; /** * GetFunctionConfigurationRequest Marshaller */ public class GetFunctionConfigurationRequestMarshaller implements Marshaller<Request<GetFunctionConfigurationRequest>, GetFunctionConfigurationRequest> { private static final String DEFAULT_CONTENT_TYPE = "application/x-amz-json-1.1"; private final SdkJsonProtocolFactory protocolFactory; public GetFunctionConfigurationRequestMarshaller( SdkJsonProtocolFactory protocolFactory) { this.protocolFactory = protocolFactory; } public Request<GetFunctionConfigurationRequest> marshall( GetFunctionConfigurationRequest getFunctionConfigurationRequest) { if (getFunctionConfigurationRequest == null) { throw new AmazonClientException( "Invalid argument passed to marshall(...)"); } Request<GetFunctionConfigurationRequest> request = new DefaultRequest<GetFunctionConfigurationRequest>( getFunctionConfigurationRequest, "AWSLambda"); request.setHttpMethod(HttpMethodName.GET); String uriResourcePath = "/2015-03-31/functions/{FunctionName}/configuration"; uriResourcePath = uriResourcePath .replace( "{FunctionName}", (getFunctionConfigurationRequest.getFunctionName() != null) ? SdkHttpUtils.urlEncode( StringUtils .fromString(getFunctionConfigurationRequest .getFunctionName()), false) : ""); request.setResourcePath(uriResourcePath); if (getFunctionConfigurationRequest.getQualifier() != null) { request.addParameter("Qualifier", StringUtils .fromString(getFunctionConfigurationRequest.getQualifier())); } request.setContent(new ByteArrayInputStream(new byte[0])); if (!request.getHeaders().containsKey("Content-Type")) { request.addHeader("Content-Type", DEFAULT_CONTENT_TYPE); } return request; } }
apache-2.0
kigsmtua/estatio
estatioapp/fixture/src/main/java/org/estatio/fixture/party/PersonAbstract.java
3768
/* * * Copyright 2012-2014 Eurocommercial Properties NV * * * Licensed under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.estatio.fixture.party; import javax.inject.Inject; import org.isisaddons.module.security.dom.tenancy.ApplicationTenancies; import org.isisaddons.module.security.dom.tenancy.ApplicationTenancy; import org.estatio.dom.communicationchannel.CommunicationChannelContributions; import org.estatio.dom.communicationchannel.CommunicationChannelType; import org.estatio.dom.geography.Countries; import org.estatio.dom.geography.States; import org.estatio.dom.party.Parties; import org.estatio.dom.party.Party; import org.estatio.dom.party.PersonGenderType; import org.estatio.dom.party.Persons; import org.estatio.dom.party.relationship.PartyRelationships; import org.estatio.fixture.EstatioFixtureScript; public abstract class PersonAbstract extends EstatioFixtureScript { @Override protected abstract void execute(ExecutionContext executionContext); protected Party createPerson( final String atPath, final String reference, final String initials, final String firstName, final String lastName, final PersonGenderType gender, final ExecutionContext executionContext) { ApplicationTenancy applicationTenancy = applicationTenancies.findTenancyByPath(atPath); Party party = persons.newPerson(reference, initials, firstName, lastName, gender, applicationTenancy); return executionContext.addResult(this, party.getReference(), party); } protected Party createPerson( final String atPath, final String reference, final String initials, final String firstName, final String lastName, final PersonGenderType gender, final String phoneNumber, final String emailAddress, final String fromPartyStr, final String relationshipType, final ExecutionContext executionContext) { ApplicationTenancy applicationTenancy = applicationTenancies.findTenancyByPath(atPath); // new person Party party = persons.newPerson(reference, initials, firstName, lastName, gender, applicationTenancy); communicationChannelContributedActions.newEmail(party, CommunicationChannelType.EMAIL_ADDRESS, emailAddress); communicationChannelContributedActions.newPhoneOrFax(party, CommunicationChannelType.PHONE_NUMBER, phoneNumber); // associate person Party from = parties.findPartyByReference(fromPartyStr); partyRelationships.newRelationship(from, party, relationshipType, null); return executionContext.addResult(this, party.getReference(), party); } // ////////////////////////////////////// @Inject protected Countries countries; @Inject protected States states; @Inject protected Parties parties; @Inject protected Persons persons; @Inject protected CommunicationChannelContributions communicationChannelContributedActions; @Inject protected PartyRelationships partyRelationships; @Inject protected ApplicationTenancies applicationTenancies; }
apache-2.0
rpbouman/spuc
src/html/spuc.html
3120
<!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Dashboards</title> <script type="text/javascript"> var mantle_win; if (parent) mantle_win = parent; if (window.opener) { if (window.opener.parent) mantle_win = window.opener.parent; else mantle_win = window.opener; } var active_theme = mantle_win.active_theme; var core_theme_tree = mantle_win.core_theme_tree; var module_theme_tree = mantle_win.module_theme_tree; var CONTEXT_PATH = mantle_win.CONTEXT_PATH; </script> <script type="text/javascript" src="../../../../../js/themeResources.js"></script> <script type="text/javascript" src="../../../../../js/require.js"></script> <link rel="stylesheet" type="text/css" href="../css/tooltip.css" /> </head> <body class="pentaho-page-background"> <script type="text/javascript" src="../js/utilities.js"></script> <script type="text/javascript" src="../js/dom.js"></script> <script type="text/javascript" src="../js/event.js"></script> <script type="text/javascript" src="../js/observable.js"></script> <script type="text/javascript" src="../js/dnd.js"></script> <script type="text/javascript" src="../js/displayed.js"></script> <script type="text/javascript" src="../js/disabled.js"></script> <script type="text/javascript" src="../js/toolbar.js"></script> <script type="text/javascript" src="../js/treenode.js"></script> <script type="text/javascript" src="../js/treelistener.js"></script> <script type="text/javascript" src="../js/treeselection.js"></script> <script type="text/javascript" src="../js/highlighter.js"></script> <script type="text/javascript" src="../js/contentarea.js"></script> <script type="text/javascript" src="../js/contentpane.js"></script> <script type="text/javascript" src="../js/splitpane.js"></script> <script type="text/javascript" src="../js/tabpane.js"></script> <script type="text/javascript" src="../js/datagrid.js"></script> <script type="text/javascript" src="../js/datagridcellediting.js"></script> <script type="text/javascript" src="../js/parseXml.js"></script> <script type="text/javascript" src="../js/DataDocument.js"></script> <script type="text/javascript" src="../js/PdiDataDocument.js"></script> <script type="text/javascript" src="../js/PdiTransformation.js"></script> <script type="text/javascript" src="../js/PdiTransformationEditor.js"></script> <script type="text/javascript" src="../js/PdiEditorSelector.js"></script> <script type="text/javascript" src="../js/PdiEditorSelection.js"></script> <script type="text/javascript" src="../js/PdiJob.js"></script> <script type="text/javascript" src="../js/PdiJobEditor.js"></script> <script type="text/javascript" src="../js/Phile.js"></script> <script type="text/javascript" src="../js/Spuc.js"></script> <script type="text/javascript" src="../js/Kettle.js"></script> <script type="text/javascript" src="../js/app.js"></script> </body> </html>
apache-2.0
wh1t3projects/Pure_PHP_Framework
docs/quickstart/Page07-CreatingABasicModule.md
3453
##### Creating a basic module Modules can be very useful by providing you with more features that can be used within web pages and can even provide a full web app that will work on top of your website. You can get more information about modules in the [modules](../modules/Home) section of the documentation.<br/> Here, we will only show you the basics on how to create a module. There is absolutely no limit on what you can with them, so you can go very deep and build a powerful web app.<br/> One important thing to remember is that modules are loaded during the Framework **boot process**. Therefore, it is preferable to **only** declare functions, load libraries (if needed) and hook to [events](../functions/event_handler/kernel_event_register) during the initialization. Then, you can use these events to run your code.<br/> Modules folder are named using the syntax *organization_moduleName*. For example, if we want to create a module named *helloWorld*, then our module's folder would be *wh1t3projects_helloWorld*.<br/> The reason to specify the organization's name as part of the module's name is the same than with our function that we created earlier: to prevent a 'collision'. Only in this case, it's to prevent having a 'collision' if another organization or developer create a module with the same name and you would like to use both. Obviously, that would be impossible, since one's files would replace the other's.<br/> Now create a folder inside the **modules** directory and create a file named `init.php` inside it. Open the new file for editing.<br/> Here you can write any PHP code you want, but like previously stated, it is best to only create functions here and hook to events to run our code. A module can also only provide functions (never hooking to an event) and therefore, act only as a library for other modules or even web pages.<br/> **NOTE**: In the event you wrote code that works by using another module as a library, or if you need features from it that are automated, you might want to first check if that module is installed by using the [kernel_checkIfModuleIsValid](../functions/kernel/kernel_checkIfModuleIsValid) function prior to doing anything. However, this guide doesn't cover its usage.<br/> Add the usual `<?php` and `?>` at the beginning and ending of the file respectively, so that your module's **code** will be processed and not be sent as **text** to the browser.<br/> Now you can get a bit original. Create a function with any name you want, but again, to prevent a 'collision' you need to prefix it with your **module's** name (and not the organization) followed by the underscore, like `helloWorld_hello`.<br/> Write some code that will output something to the browser (instead of returning data), like `echo 'Hello World!';`. This will make it easier to see our module is working when calling it.<br/> Now add a call, to your new function, inside a web page (make sure to put it inside a `div` with a `container` or you may not see your text!). Save the files and test it via your browser.<br/> It works! Now, if you want, you can even call the `theme_column` function from within your new function and make it say anything you want. Make sure that you call your new function **only when rendering a page** in that case or you will get an error because the theme's functions are not loaded and, therefore, not declared. [Previous](./Page06-UsingAThemeFunction) | [Next](./Page08-ThatsIt)
apache-2.0
Pants/stamp-java-obfuscator
src/main/java/wtf/pants/stamp/annotations/StampPreserve.java
364
package wtf.pants.stamp.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * @author Pants * * Currently only works for methods */ @Retention(RetentionPolicy.CLASS) @Target({ElementType.METHOD}) public @interface StampPreserve { }
apache-2.0
EnMasseProject/enmasse
pkg/consolegraphql/watchers/resource_watcher_addressspaceschema.go
7437
/* * Copyright 2019, EnMasse authors. * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). */ // Code generated by go generate; DO NOT EDIT. package watchers import ( "fmt" tp "github.com/enmasseproject/enmasse/pkg/apis/enmasse/v1beta1" cp "github.com/enmasseproject/enmasse/pkg/client/clientset/versioned/typed/enmasse/v1beta1" "github.com/enmasseproject/enmasse/pkg/consolegraphql/cache" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" "log" "math/rand" "reflect" "time" ) type AddressSpaceSchemaWatcher struct { Namespace string cache.Cache ClientInterface cp.EnmasseV1beta1Interface watching chan struct{} watchingStarted bool stopchan chan struct{} stoppedchan chan struct{} create func(*tp.AddressSpaceSchema) interface{} update func(*tp.AddressSpaceSchema, interface{}) bool restartCounter int32 resyncInterval *time.Duration } func NewAddressSpaceSchemaWatcher(c cache.Cache, resyncInterval *time.Duration, options ...WatcherOption) (ResourceWatcher, error) { kw := &AddressSpaceSchemaWatcher{ Namespace: v1.NamespaceAll, Cache: c, watching: make(chan struct{}), stopchan: make(chan struct{}), stoppedchan: make(chan struct{}), resyncInterval: resyncInterval, create: func(v *tp.AddressSpaceSchema) interface{} { return v }, update: func(v *tp.AddressSpaceSchema, e interface{}) bool { if !reflect.DeepEqual(v, e) { *e.(*tp.AddressSpaceSchema) = *v return true } else { return false } }, } for _, option := range options { option(kw) } if kw.ClientInterface == nil { return nil, fmt.Errorf("Client must be configured using the AddressSpaceSchemaWatcherConfig or AddressSpaceSchemaWatcherClient") } return kw, nil } func AddressSpaceSchemaWatcherFactory(create func(*tp.AddressSpaceSchema) interface{}, update func(*tp.AddressSpaceSchema, interface{}) bool) WatcherOption { return func(watcher ResourceWatcher) error { w := watcher.(*AddressSpaceSchemaWatcher) w.create = create w.update = update return nil } } func AddressSpaceSchemaWatcherConfig(config *rest.Config) WatcherOption { return func(watcher ResourceWatcher) error { w := watcher.(*AddressSpaceSchemaWatcher) var cl interface{} cl, _ = cp.NewForConfig(config) client, ok := cl.(cp.EnmasseV1beta1Interface) if !ok { return fmt.Errorf("unexpected type %T", cl) } w.ClientInterface = client return nil } } // Used to inject the fake client set for testing purposes func AddressSpaceSchemaWatcherClient(client cp.EnmasseV1beta1Interface) WatcherOption { return func(watcher ResourceWatcher) error { w := watcher.(*AddressSpaceSchemaWatcher) w.ClientInterface = client return nil } } func (kw *AddressSpaceSchemaWatcher) Watch() error { go func() { defer close(kw.stoppedchan) defer func() { if !kw.watchingStarted { close(kw.watching) } }() resource := kw.ClientInterface.AddressSpaceSchemas() log.Printf("AddressSpaceSchema - Watching") running := true for running { err := kw.doWatch(resource) if err != nil { log.Printf("AddressSpaceSchema - Restarting watch - %v", err) atomicInc(&kw.restartCounter) } else { running = false } } log.Printf("AddressSpaceSchema - Watching stopped") }() return nil } func (kw *AddressSpaceSchemaWatcher) AwaitWatching() { <-kw.watching } func (kw *AddressSpaceSchemaWatcher) Shutdown() { close(kw.stopchan) <-kw.stoppedchan } func (kw *AddressSpaceSchemaWatcher) GetRestartCount() int32 { return atomicGet(&kw.restartCounter) } func (kw *AddressSpaceSchemaWatcher) doWatch(resource cp.AddressSpaceSchemaInterface) error { resourceList, err := resource.List(v1.ListOptions{}) if err != nil { return err } keyCreator, err := kw.Cache.GetKeyCreator(cache.PrimaryObjectIndex) if err != nil { return err } curr := make(map[string]interface{}, 0) _, err = kw.Cache.Get(cache.PrimaryObjectIndex, "AddressSpaceSchema/", func(obj interface{}) (bool, bool, error) { gen, key, err := keyCreator(obj) if err != nil { return false, false, err } else if !gen { return false, false, fmt.Errorf("failed to generate key for existing object %+v", obj) } curr[key] = obj return false, true, nil }) var added = 0 var updated = 0 var unchanged = 0 for _, res := range resourceList.Items { copy := res.DeepCopy() kw.updateGroupVersionKind(copy) candidate := kw.create(copy) gen, key, err := keyCreator(candidate) if err != nil { return err } else if !gen { return fmt.Errorf("failed to generate key for new object %+v", copy) } if existing, ok := curr[key]; ok { err = kw.Cache.Update(func(target interface{}) (interface{}, error) { if kw.update(copy, target) { updated++ return target, nil } else { unchanged++ return nil, nil } }, existing) if err != nil { return err } delete(curr, key) } else { err = kw.Cache.Add(candidate) if err != nil { return err } added++ } } // Now remove any stale for _, stale := range curr { err = kw.Cache.Delete(stale) if err != nil { return err } } var stale = len(curr) log.Printf("AddressSpaceSchema - Cache initialised population added %d, updated %d, unchanged %d, stale %d", added, updated, unchanged, stale) watchOptions := v1.ListOptions{ ResourceVersion: resourceList.ResourceVersion, } if kw.resyncInterval != nil { ts := int64(kw.resyncInterval.Seconds() * (rand.Float64() + 1.0)) watchOptions.TimeoutSeconds = &ts } resourceWatch, err := resource.Watch(watchOptions) if err != nil { return err } defer resourceWatch.Stop() if !kw.watchingStarted { close(kw.watching) kw.watchingStarted = true } ch := resourceWatch.ResultChan() for { select { case event, chok := <-ch: if !chok { return fmt.Errorf("watch ended due to channel error") } else if event.Type == watch.Error { return fmt.Errorf("watch ended in error") } var err error log.Printf("AddressSpaceSchema - Received event type %s", event.Type) res, ok := event.Object.(*tp.AddressSpaceSchema) if !ok { err = fmt.Errorf("Watch error - object of unexpected type, %T, received", event.Object) } else { copy := res.DeepCopy() kw.updateGroupVersionKind(copy) switch event.Type { case watch.Added: err = kw.Cache.Add(kw.create(copy)) case watch.Modified: updatingKey := kw.create(copy) err = kw.Cache.Update(func(target interface{}) (interface{}, error) { if kw.update(copy, target) { return target, nil } else { return nil, nil } }, updatingKey) case watch.Deleted: err = kw.Cache.Delete(kw.create(copy)) } } if err != nil { return err } case <-kw.stopchan: log.Printf("AddressSpaceSchema - Shutdown received") return nil } } } // KubernetesRBACAccessController relies on the GVK information to be set on objects. // List provides GVK (https://github.com/kubernetes/kubernetes/pull/63972) but Watch does not not so we set it ourselves. func (kw *AddressSpaceSchemaWatcher) updateGroupVersionKind(o *tp.AddressSpaceSchema) { if o.TypeMeta.Kind == "" || o.TypeMeta.APIVersion == "" { o.TypeMeta.SetGroupVersionKind(tp.SchemeGroupVersion.WithKind("AddressSpaceSchema")) } }
apache-2.0
shawkins/teiid-documents
reference/Date_Time_Functions.html
103
<html> <head> <meta http-equiv="refresh" content="0; URL=r_date-time-functions.html" /> </head> </html>
apache-2.0
clementparizot/ET_Redux
src/main/java/org/earthtime/Tripoli/dataViews/fitFunctionPresentationViews/SessionFitFunctionsPresentationView.java
2640
/* * SessionFitFunctionsPresentationView.java * * Copyright 2006-2015 James F. Bowring and www.Earth-Time.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.earthtime.Tripoli.dataViews.fitFunctionPresentationViews; import java.awt.Cursor; import java.awt.Graphics2D; import java.awt.Rectangle; import javax.swing.JLayeredPane; import org.earthtime.Tripoli.dataModels.DataModelFitFunctionInterface; import org.earthtime.Tripoli.dataModels.sessionModels.AbstractSessionForStandardDataModel; import org.earthtime.Tripoli.dataViews.simpleViews.FitFunctionDataInterface; import org.earthtime.dataDictionaries.DataPresentationModeEnum; /** * * @author James F. Bowring */ public class SessionFitFunctionsPresentationView extends AbstractFitFunctionPresentationView { private final DataModelFitFunctionInterface sessionForStandardDataModel; /** * * * @param sampleSessionDataView * @param sessionForStandardDataModel * @param targetDataModelView * @param dataPresentationMode * @param bounds */ public SessionFitFunctionsPresentationView( // JLayeredPane sampleSessionDataView, // DataModelFitFunctionInterface sessionForStandardDataModel,// FitFunctionDataInterface targetDataModelView, // DataPresentationModeEnum dataPresentationMode, // Rectangle bounds) { super(targetDataModelView, bounds); setCursor(Cursor.getDefaultCursor()); this.sampleSessionDataView = sampleSessionDataView; this.sessionForStandardDataModel = sessionForStandardDataModel; this.dataPresentationMode = dataPresentationMode; this.standardValue = ((AbstractSessionForStandardDataModel) sessionForStandardDataModel).getStandardValue(); } /** * * @param g2d */ @Override public void paint(Graphics2D g2d) { paintInit(g2d); } /** * */ @Override public void preparePanel() { removeAll(); createFitFunctionPanes(sessionForStandardDataModel, true); } }
apache-2.0
swipely/amazon-ecs-agent
agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus.go
2717
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package fsxwindowsfileserver import ( "errors" "strings" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" ) // FSxWindowsFileServerVolumeStatus defines resource statuses for fsxwindowsfileserver resource type FSxWindowsFileServerVolumeStatus resourcestatus.ResourceStatus const ( // FSxWindowsFileServerVolumeStatusNone is the zero state of a task resource FSxWindowsFileServerVolumeStatusNone FSxWindowsFileServerVolumeStatus = iota // FSxWindowsFileServerVolumeCreated represents a task resource which has been created FSxWindowsFileServerVolumeCreated // FSxWindowsFileServerVolumeRemoved represents a task resource which has been cleaned up FSxWindowsFileServerVolumeRemoved ) var FSxWindowsFileServerVolumeStatusMap = map[string]FSxWindowsFileServerVolumeStatus{ "NONE": FSxWindowsFileServerVolumeStatusNone, "CREATED": FSxWindowsFileServerVolumeCreated, "REMOVED": FSxWindowsFileServerVolumeRemoved, } // StatusString returns a human readable string representation of this object func (fs FSxWindowsFileServerVolumeStatus) String() string { for k, v := range FSxWindowsFileServerVolumeStatusMap { if v == fs { return k } } return "NONE" } // MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type func (fs *FSxWindowsFileServerVolumeStatus) MarshalJSON() ([]byte, error) { if fs == nil { return nil, nil } return []byte(`"` + fs.String() + `"`), nil } // UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data func (fs *FSxWindowsFileServerVolumeStatus) UnmarshalJSON(b []byte) error { if strings.ToLower(string(b)) == "null" { *fs = FSxWindowsFileServerVolumeStatusNone return nil } if b[0] != '"' || b[len(b)-1] != '"' { *fs = FSxWindowsFileServerVolumeStatusNone return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) } strStatus := b[1 : len(b)-1] stat, ok := FSxWindowsFileServerVolumeStatusMap[string(strStatus)] if !ok { *fs = FSxWindowsFileServerVolumeStatusNone return errors.New("resource status unmarshal: unrecognized status") } *fs = stat return nil }
apache-2.0
dlna/DeveloperToolsForUPnP
Tools/DeviceBuilder/Util/ILibParsers.h
24999
/* * INTEL CONFIDENTIAL * Copyright (c) 2002, 2003 Intel Corporation. All rights reserved. * * The source code contained or described herein and all documents * related to the source code ("Material") are owned by Intel * Corporation or its suppliers or licensors. Title to the * Material remains with Intel Corporation or its suppliers and * licensors. The Material contains trade secrets and proprietary * and confidential information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright and * trade secret laws and treaty provisions. No part of the Material * may be used, copied, reproduced, modified, published, uploaded, * posted, transmitted, distributed, or disclosed in any way without * Intel's prior express written permission. * No license under any patent, copyright, trade secret or other * intellectual property right is granted to or conferred upon you * by disclosure or delivery of the Materials, either expressly, by * implication, inducement, estoppel or otherwise. Any license * under such intellectual property rights must be express and * approved by Intel in writing. * * $Workfile: ILibParsers.h * $Revision: #1.0.1804.21376 * $Author: Intel Corporation, Intel Device Builder * $Date: Thursday, December 09, 2004 * * * */ /*! \file ILibParsers.h \brief MicroStack APIs for various functions and tasks */ #ifndef __ILibParsers__ #define __ILibParsers__ /*! \defgroup ILibParsers ILibParser Modules \{ \} */ /*! \def MAX_HEADER_LENGTH Specifies the maximum allowed length for an HTTP Header */ #define MAX_HEADER_LENGTH 800 #ifdef MEMORY_CHECK #include <assert.h> #define MEMCHECK(x) x #else #define MEMCHECK(x) #endif #ifdef _WIN32_WCE #define REQUIRES_MEMORY_ALIGNMENT #define errno 0 #define ERANGE 1 #define time(x) GetTickCount() #endif #ifndef WIN32 #define REQUIRES_MEMORY_ALIGNMENT #endif #if defined(WIN32) || defined (_WIN32_WCE) #ifndef MICROSTACK_NO_STDAFX #include "stdafx.h" #endif #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/time.h> #include <netdb.h> #include <sys/ioctl.h> #include <net/if.h> #include <sys/utsname.h> #include <netinet/in.h> #include <unistd.h> #include <errno.h> #include <semaphore.h> #include <malloc.h> #include <fcntl.h> #include <signal.h> #endif #include <stdlib.h> #include <stdio.h> #include <stddef.h> #include <string.h> #include <math.h> #ifdef WIN32 #include <windows.h> #include <winioctl.h> #include <winbase.h> #endif #ifndef _WIN32_WCE #include <time.h> #endif #if defined(WIN32) || defined(_WIN32_WCE) #define sem_t HANDLE #define sem_init(x,y,z) *x=CreateSemaphore(NULL,z,FD_SETSIZE,NULL) #define sem_destroy(x) (CloseHandle(*x)==0?1:0) #define sem_wait(x) WaitForSingleObject(*x,INFINITE) #define sem_trywait(x) ((WaitForSingleObject(*x,0)==WAIT_OBJECT_0)?0:1) #define sem_post(x) ReleaseSemaphore(*x,1,NULL) #define strncasecmp(x,y,z) _strnicmp(x,y,z) #define strcasecmp(x,y) _stricmp(x,y) #define gettimeofday(x,y) (x)->tv_sec = GetTickCount()/1000;(x)->tv_usec = 1000*(GetTickCount()%1000) #ifndef stricmp #define stricmp(x,y) _stricmp(x,y) #endif #ifndef strnicmp #define strnicmp(x,y,z) _strnicmp(x,y,z) #endif #ifndef strcmpi #define strcmpi(x,y) _stricmp(x,y) #endif #endif /*! \def UPnPMIN(a,b) Returns the minimum of \a a and \a b. */ #define UPnPMIN(a,b) (((a)<(b))?(a):(b)) /*! \def ILibIsChainBeingDestroyed(Chain) Determines if the specified chain is in the process of being disposed. */ #define ILibIsChainBeingDestroyed(Chain) (*((int*)Chain)) typedef enum { ILibServerScope_All=0, ILibServerScope_LocalLoopback=1, ILibServerScope_LocalSegment=2 }ILibServerScope; typedef void(*ILibChain_PreSelect)(void* object,fd_set *readset, fd_set *writeset, fd_set *errorset, int* blocktime); typedef void(*ILibChain_PostSelect)(void* object,int slct, fd_set *readset, fd_set *writeset, fd_set *errorset); typedef void(*ILibChain_Destroy)(void* object); /*! \defgroup DataStructures Data Structures \ingroup ILibParsers \{ \} */ /*! \struct parser_result_field ILibParsers.h \brief Data Elements of \a parser_result \para This structure represents individual tokens, resulting from a call to \a ILibParseString and \a ILibParseStringAdv */ struct parser_result_field { /*! \var data \brief Pointer to string */ char* data; /*! \var datalength \brief Length of \a data */ int datalength; /*! \var NextResult \brief Pointer to next token */ struct parser_result_field *NextResult; }; /*! \struct parser_result ILibParsers.h \brief String Parsing Result Index \para This is returned from a successfull call to either \a ILibParseString or \a ILibParseStringAdv. */ struct parser_result { /*! \var FirstResult \brief Pointer to the first token */ struct parser_result_field *FirstResult; /*! \var LastResult \brief Pointer to the last token */ struct parser_result_field *LastResult; /*! \var NumResults \brief The total number of tokens */ int NumResults; }; /*! \struct packetheader_field_node ILibParsers.h \brief HTTP Headers \para This structure represents an individual header element. A list of these is referenced from a \packetheader_field_node. */ struct packetheader_field_node { /*! \var Field \brief Header Name */ char* Field; /*! \var FieldLength \brief Length of \a Field */ int FieldLength; /*! \var FieldData \brief Header Value */ char* FieldData; /*! \var FieldDataLength \brief Length of \a FieldData */ int FieldDataLength; /*! \var UserAllocStrings \brief Boolean indicating if the above strings are non-static memory */ int UserAllocStrings; /*! \var NextField \brief Pointer to the Next Header entry. NULL if this is the last one */ struct packetheader_field_node* NextField; }; /*! \struct packetheader ILibParsers.h \brief Structure representing a packet formatted according to HTTP encoding rules \para This can be created manually by calling \a ILibCreateEmptyPacket(), or automatically via a call to \a ILibParsePacketHeader(...) */ struct packetheader { /*! \var Directive \brief HTTP Method \para eg: \b GET /index.html HTTP/1.1 */ char* Directive; /*! \var DirectiveLength \brief Length of \a Directive */ int DirectiveLength; /*! \var DirectiveObj \brief HTTP Method Path \para eg: GET \b /index.html HTTP/1.1 */ char* DirectiveObj; /*! \var DirectiveObjLength \brief Length of \a DirectiveObj */ void *Reserved; int DirectiveObjLength; /*! \var StatusCode \brief HTTP Response Code \para eg: HTTP/1.1 \b 200 OK */ int StatusCode; /* \var StatusData \brief Status Meta Data \para eg: HTTP/1.1 200 \b OK */ char* StatusData; /*! \var StatusDataLength \brief Length of \a StatusData */ int StatusDataLength; /*! \var Version \brief HTTP Version \para eg: 1.1 */ char* Version; /*! \var VersionLength \brief Length of \a Version */ int VersionLength; /*! \var Body \brief Pointer to HTTP Body */ char* Body; /*! \var BodyLength \brief Length of \a Body */ int BodyLength; /*! \var UserAllocStrings \brief Boolean indicating if Directive/Obj are non-static \para This only needs to be set, if you manually populate \a Directive and \a DirectiveObj.<br> It is \b recommended that you use \a ILibSetDirective */ int UserAllocStrings; // Set flag if you allocate memory pointed to by Directive/Obj /*! \var UserAllocVersion \brief Boolean indicating if Version string is non-static \para This only needs to be set, if you manually populate \a Version.<br> It is \b recommended that you use \a ILibSetVersion */ int UserAllocVersion; // Set flag if you allocate memory pointed to by Version int ClonedPacket; /*! \var FirstField \brief Pointer to the first Header field */ struct packetheader_field_node* FirstField; /*! \var LastField \brief Pointer to the last Header field */ struct packetheader_field_node* LastField; /*! \var Source \brief The origin of this packet \para This is only populated if you obtain this structure from either \a ILibWebServer or \a ILibWebClient. */ struct sockaddr_in *Source; /*! \var ReceivingAddress \brief IP address that this packet was received on \para This is only populated if you obtain this structure from either \a ILibWebServer or \a ILibWebClient. */ int ReceivingAddress; void *HeaderTable; }; /*! \struct ILibXMLNode \brief An XML Tree \para This is obtained via a call to \a ILibParseXML. It is \b highly \b recommended that you call \a ILibProcessXMLNodeList, so that the node pointers at the end of this structure will be populated. That will greatly simplify node traversal.<br><br> In order for namespaces to be resolved, you must call \a ILibXML_BuildNamespaceLookupTable(...) with root-most node that you would like to resolve namespaces for. It is recommended that you always use the root node, obtained from the initial call to \a ILibParseXML.<br><br> For most intents and purposes, you only need to work with the "StartElements" */ struct ILibXMLNode { /*! \var Name \brief Local Name of the current element */ char* Name; // Element Name /*! \var NameLength \brief Length of \a Name */ int NameLength; /*! \var NSTag \brief Namespace Prefix of the current element \para This can be resolved using a call to \a ILibXML_LookupNamespace(...) */ char* NSTag; // Element Prefix /*! \var NSLength \brief Length of \a NSTag */ int NSLength; /*! \var StartTag \brief boolean indicating if the current element is a start element */ int StartTag; // Non zero if this is a StartElement /*! \var EmptyTag \brief boolean indicating if this element is an empty element */ int EmptyTag; // Non zero if this is an EmptyElement void *Reserved; // DO NOT TOUCH void *Reserved2; // DO NOT TOUCH /*! \var Next \brief Pointer to the child of the current node */ struct ILibXMLNode *Next; // Next Node /*! \var Parent \brief Pointer to the Parent of the current node */ struct ILibXMLNode *Parent; // Parent Node /*! \var Peer \brief Pointer to the sibling of the current node */ struct ILibXMLNode *Peer; // Sibling Node struct ILibXMLNode *ClosingTag; // Pointer to closing node of this element struct ILibXMLNode *StartingTag; // Pointer to start node of this element }; /*! \struct ILibXMLAttribute \brief A list of XML Attributes for a specified XML node \para This can be obtained via a call to \a ILibGetXMLAttributes(...) */ struct ILibXMLAttribute { /*! \var Name \brief Local name of Attribute */ char* Name; // Attribute Name /*! \var NameLength \brief Length of \a Name */ int NameLength; /*! \var Prefix \brief Namespace Prefix of this attribute \para This can be resolved by calling \a ILibXML_LookupNamespace(...) and passing in \a Parent as the current node */ char* Prefix; // Attribute Namespace Prefix /*! \var PrefixLength \brief Lenth of \a Prefix */ int PrefixLength; /*! \var Parent \brief Pointer to the XML Node that contains this attribute */ struct ILibXMLNode *Parent; // The XML Node this attribute belongs to /*! \var Value \brief Attribute Value */ char* Value; // Attribute Value /*! \var ValueLength \brief Length of \a Value */ int ValueLength; /*! \var Next \brief Pointer to the next attribute */ struct ILibXMLAttribute *Next; // Next Attribute }; /*! \fn ILibFindEntryInTable(char *Entry, char **Table) \brief Find the index in \a Table that contains \a Entry. */ int ILibFindEntryInTable(char *Entry, char **Table); char *ILibReadFileFromDisk(char *FileName); int ILibReadFileFromDiskEx(char **Target, char *FileName); void ILibWriteStringToDisk(char *FileName, char *data); void ILibWriteStringToDiskEx(char *FileName, char *data, int dataLen); void ILibDeleteFileFromDisk(char *FileName); /*! \defgroup StackGroup Stack \ingroup DataStructures Stack Methods \{ */ void ILibCreateStack(void **TheStack); void ILibPushStack(void **TheStack, void *data); void *ILibPopStack(void **TheStack); void *ILibPeekStack(void **TheStack); void ILibClearStack(void **TheStack); /*! \} */ /*! \defgroup QueueGroup Queue \ingroup DataStructures Queue Methods \{ */ void *ILibQueue_Create(); void ILibQueue_Destroy(void *q); int ILibQueue_IsEmpty(void *q); void ILibQueue_EnQueue(void *q, void *data); void *ILibQueue_DeQueue(void *q); void *ILibQueue_PeekQueue(void *q); void ILibQueue_Lock(void *q); void ILibQueue_UnLock(void *q); /* \} */ /*! \defgroup XML XML Parsing Methods \ingroup ILibParsers MicroStack supplied XML Parsing Methods \par \b Note: None of the XML Parsing Methods copy or allocate memory The lifetime of any/all strings is bound to the underlying string that was parsed using ILibParseXML. If you wish to keep any of these strings for longer then the lifetime of the underlying string, you must copy the string. \{ */ // // Parses an XML string. Returns a tree of ILibXMLNode elements. // struct ILibXMLNode *ILibParseXML(char *buffer, int offset, int length); // // Preprocesses the tree of ILibXMLNode elements returned by ILibParseXML. // This method populates all the node pointers in each node for easy traversal. // In addition, this method will also determine if the given XML document was well formed. // Returns 0 if processing succeeded. Specific Error Codes are returned otherwise. // int ILibProcessXMLNodeList(struct ILibXMLNode *nodeList); // // Initalizes a namespace lookup table for a given parent node. // If you want to resolve namespaces, you must call this method exactly once // void ILibXML_BuildNamespaceLookupTable(struct ILibXMLNode *node); // // Resolves a namespace prefix. // char* ILibXML_LookupNamespace(struct ILibXMLNode *currentLocation, char *prefix, int prefixLength); // // Fetches all the data for an element. Returns the length of the populated data // int ILibReadInnerXML(struct ILibXMLNode *node, char **RetVal); // // Returns the attributes of an XML element. Returned as a linked list of ILibXMLAttribute. // struct ILibXMLAttribute *ILibGetXMLAttributes(struct ILibXMLNode *node); void ILibDestructXMLNodeList(struct ILibXMLNode *node); void ILibDestructXMLAttributeList(struct ILibXMLAttribute *attribute); // // Escapes an XML string. // indata must be pre-allocated. // int ILibXmlEscape(char* outdata, const char* indata); // // Returns the required size string necessary to escape this XML string // int ILibXmlEscapeLength(const char* data); // // Unescapes an XML string. // Since Unescaped strings are always shorter than escaped strings, // the resultant string will overwrite the supplied string, to save memory // int ILibInPlaceXmlUnEscape(char* data); /*! \} */ /*! \defgroup ChainGroup Chain Methods \ingroup ILibParsers \brief Chaining Methods \{ */ void *ILibCreateChain(); void ILibAddToChain(void *chain, void *object); void ILibStartChain(void *chain); void ILibStopChain(void *chain); void ILibForceUnBlockChain(void *Chain); /* \} */ /*! \defgroup LinkedListGroup Linked List \ingroup DataStructures \{ */ // // Initializes a new Linked List data structre // void* ILibLinkedList_Create(); // // Returns the Head node of a linked list data structure // void* ILibLinkedList_GetNode_Head(void *LinkedList); // Returns Node // // Returns the Tail node of a linked list data structure // void* ILibLinkedList_GetNode_Tail(void *LinkedList); // Returns Node // // Returns the Next node of a linked list data structure // void* ILibLinkedList_GetNextNode(void *LinkedList_Node); // Returns Node // // Returns the Previous node of a linked list data structure // void* ILibLinkedList_GetPreviousNode(void *LinkedList_Node);// Returns Node // // Returns the number of nodes contained in a linked list data structure // long ILibLinkedList_GetCount(void *LinkedList); // // Returns a shallow copy of a linked list data structure. That is, the structure // is copied, but none of the data contents are copied. The pointer values are just copied. // void* ILibLinkedList_ShallowCopy(void *LinkedList); // // Returns the data pointer of a linked list element // void *ILibLinkedList_GetDataFromNode(void *LinkedList_Node); // // Creates a new element, and inserts it before the given node // void ILibLinkedList_InsertBefore(void *LinkedList_Node, void *data); // // Creates a new element, and inserts it after the given node // void ILibLinkedList_InsertAfter(void *LinkedList_Node, void *data); // // Removes the given node from a linked list data structure // void* ILibLinkedList_Remove(void *LinkedList_Node); // // Given a data pointer, will traverse the linked list data structure, deleting // elements that point to this data pointer. // int ILibLinkedList_Remove_ByData(void *LinkedList, void *data); // // Creates a new element, and inserts it at the top of the linked list. // void ILibLinkedList_AddHead(void *LinkedList, void *data); // // Creates a new element, and appends it to the end of the linked list // void ILibLinkedList_AddTail(void *LinkedList, void *data); void ILibLinkedList_Lock(void *LinkedList); void ILibLinkedList_UnLock(void *LinkedList); void ILibLinkedList_Destroy(void *LinkedList); /*! \} */ /*! \defgroup HashTreeGroup Hash Table \ingroup DataStructures \b Note: Duplicate key entries will be overwritten. \{ */ // // Initialises a new Hash Table (tree) data structure // void* ILibInitHashTree(); void* ILibInitHashTree_CaseInSensitive(); void ILibDestroyHashTree(void *tree); // // Returns non-zero if the key entry is found in the table // int ILibHasEntry(void *hashtree, char* key, int keylength); // // Add a new entry into the hashtable. If the key is already used, it will be overwriten. // void ILibAddEntry(void* hashtree, char* key, int keylength, void *value); void ILibAddEntryEx(void* hashtree, char* key, int keylength, void *value, int valueEx); void* ILibGetEntry(void *hashtree, char* key, int keylength); void ILibGetEntryEx(void *hashtree, char* key, int keylength, void *value, int *valueEx); void ILibDeleteEntry(void *hashtree, char* key, int keylength); // // Returns an Enumerator to browse all the entries of the Hashtable // void *ILibHashTree_GetEnumerator(void *tree); void ILibHashTree_DestroyEnumerator(void *tree_enumerator); // // Advance the Enumerator to the next element. // Returns non-zero if there are no more entries to enumerate // int ILibHashTree_MoveNext(void *tree_enumerator); // // Obtains the value of a Hashtable Entry. // void ILibHashTree_GetValue(void *tree_enumerator, char **key, int *keyLength, void **data); void ILibHashTree_GetValueEx(void *tree_enumerator, char **key, int *keyLength, void **data, int *dataEx); void ILibHashTree_Lock(void *hashtree); void ILibHashTree_UnLock(void *hashtree); /*! \} */ /*! \defgroup LifeTimeMonitor LifeTimeMonitor \ingroup ILibParsers \brief Timed Callback Service \para These callbacks will always be triggered on the thread that calls ILibStartChain(). \{ */ // // Adds an event trigger to be called after the specified time elapses, with the // specified data object // #define ILibLifeTime_Add(LifetimeMonitorObject, data, seconds, Callback, Destroy) ILibLifeTime_AddEx(LifetimeMonitorObject, data, seconds*1000, Callback, Destroy) void ILibLifeTime_AddEx(void *LifetimeMonitorObject,void *data, int milliseconds, void* Callback, void* Destroy); // // Removes all event triggers that contain the specified data object. // void ILibLifeTime_Remove(void *LifeTimeToken, void *data); // // Removes all events triggers // void ILibLifeTime_Flush(void *LifeTimeToken); void *ILibCreateLifeTime(void *Chain); /* \} */ /*! \defgroup StringParsing String Parsing \ingroup ILibParsers \{ */ // // Trims preceding and proceding whitespaces from a string // int ILibTrimString(char **theString, int length); // // Parses the given string using the specified multichar delimiter. // Returns a parser_result object, which points to a linked list // of parser_result_field objects. // struct parser_result* ILibParseString(char* buffer, int offset, int length, char* Delimiter, int DelimiterLength); // // Same as ILibParseString, except this method ignore all delimiters that are contains within // quotation marks // struct parser_result* ILibParseStringAdv(char* buffer, int offset, int length, char* Delimiter, int DelimiterLength); // // Releases resources used by string parser // void ILibDestructParserResults(struct parser_result *result); // // Parses a URI into IP Address, Port Number, and Path components // Note: IP and Path must be freed. // void ILibParseUri(char* URI, char** IP, unsigned short* Port, char** Path); // // Parses a string into a Long or unsigned Long. // Returns non-zero on error condition // int ILibGetLong(char *TestValue, int TestValueLength, long* NumericValue); int ILibGetULong(const char *TestValue, const int TestValueLength, unsigned long* NumericValue); int ILibFragmentText(char *text, int textLength, char *delimiter, int delimiterLength, int tokenLength, char **RetVal); int ILibFragmentTextLength(char *text, int textLength, char *delimiter, int delimiterLength, int tokenLength); /* Base64 handling methods */ int ILibBase64Encode(unsigned char* input, const int inputlen, unsigned char** output); int ILibBase64Decode(unsigned char* input, const int inputlen, unsigned char** output); /* Compression Handling Methods */ char* ILibDecompressString(unsigned char* CurrentCompressed, const int bufferLength, const int DecompressedLength); /* \} */ /*! \defgroup PacketParsing Packet Parsing \ingroup ILibParsers \{ */ /* Packet Methods */ // // Allocates an empty HTTP Packet // struct packetheader *ILibCreateEmptyPacket(); // // Add a header into the packet. (String is copied) // void ILibAddHeaderLine(struct packetheader *packet, char* FieldName, int FieldNameLength, char* FieldData, int FieldDataLength); void ILibDeleteHeaderLine(struct packetheader *packet, char* FieldName, int FieldNameLength); // // Fetches the header value from the packet. (String is NOT copied) // Returns NULL if the header field does not exist // char* ILibGetHeaderLine(struct packetheader *packet, char* FieldName, int FieldNameLength); // // Sets the HTTP version: 1.0, 1.1, etc. (string is copied) // void ILibSetVersion(struct packetheader *packet, char* Version, int VersionLength); // // Set the status code and data line. The status data is copied. // void ILibSetStatusCode(struct packetheader *packet, int StatusCode, char* StatusData, int StatusDataLength); // // Sets the method and path. (strings are copied) // void ILibSetDirective(struct packetheader *packet, char* Directive, int DirectiveLength, char* DirectiveObj, int DirectiveObjLength); // // Releases all resources consumed by this packet structure // void ILibDestructPacket(struct packetheader *packet); // // Parses a string into an packet structure. // None of the strings are copied, so the lifetime of all the values are bound // to the lifetime of the underlying string that is parsed. // struct packetheader* ILibParsePacketHeader(char* buffer, int offset, int length); // // Returns the packetized string and it's length. (must be freed) // int ILibGetRawPacket(struct packetheader *packet,char **buffer); // // Performs a deep copy of a packet structure // struct packetheader* ILibClonePacket(struct packetheader *packet); // // Escapes a string according to HTTP escaping rules. // indata must be pre-allocated // int ILibHTTPEscape(char* outdata, const char* indata); // // Returns the size of string required to escape this string, // according to HTTP escaping rules // int ILibHTTPEscapeLength(const char* data); // // Unescapes the escaped string sequence // Since escaped string sequences are always longer than unescaped // string sequences, the resultant string is overwritten onto the supplied string // int ILibInPlaceHTTPUnEscape(char* data); /* \} */ /*! \defgroup NetworkHelper Network Helper \ingroup ILibParsers \{ */ // // Obtain an array of IP Addresses available on the local machine. // int ILibGetLocalIPAddressList(int** pp_int); #if defined(WINSOCK2) int ILibGetLocalIPAddressNetMask(int address); unsigned short ILibGetDGramSocket(int local, HANDLE *TheSocket); unsigned short ILibGetStreamSocket(int local, unsigned short PortNumber,HANDLE *TheSocket); #elif defined(WINSOCK1) || defined(_WIN32_WCE) unsigned short ILibGetDGramSocket(int local, SOCKET *TheSocket); unsigned short ILibGetStreamSocket(int local, unsigned short PortNumber,SOCKET *TheSocket); #else unsigned short ILibGetDGramSocket(int local, int *TheSocket); unsigned short ILibGetStreamSocket(int local, unsigned short PortNumber,int *TheSocket); #endif /* \} */ void* dbg_malloc(int sz); void dbg_free(void* ptr); int dbg_GetCount(); /* \} */ // End of ILibParser Group #endif
apache-2.0
vladoatanasov/couchbase-lite-android
src/androidTest/java/com/couchbase/lite/DatabaseTest.java
8892
package com.couchbase.lite; import com.couchbase.lite.internal.RevisionInternal; import com.couchbase.lite.mockserver.MockDispatcher; import com.couchbase.lite.mockserver.MockHelper; import com.couchbase.lite.replicator.Replication; import com.couchbase.lite.support.FileDirUtils; import com.couchbase.lite.support.RevisionUtils; import com.couchbase.lite.util.Log; import com.squareup.okhttp.mockwebserver.MockWebServer; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; public class DatabaseTest extends LiteTestCase { public void testPruneRevsToMaxDepthViaCompact() throws Exception { Map<String, Object> properties = new HashMap<String, Object>(); properties.put("testName", "testDatabaseCompaction"); properties.put("tag", 1337); Document doc = createDocumentWithProperties(database, properties); SavedRevision rev = doc.getCurrentRevision(); database.setMaxRevTreeDepth(1); for (int i = 0; i < 10; i++) { Map<String, Object> properties2 = new HashMap<String, Object>(properties); properties2.put("tag", i); rev = rev.createRevision(properties2); } database.compact(); Document fetchedDoc = database.getDocument(doc.getId()); List<SavedRevision> revisions = fetchedDoc.getRevisionHistory(); assertEquals(1, revisions.size()); } /** * When making inserts in a transaction, the change notifications should * be batched into a single change notification (rather than a change notification * for each insert) */ public void testChangeListenerNotificationBatching() throws Exception { final int numDocs = 50; final AtomicInteger atomicInteger = new AtomicInteger(0); final CountDownLatch countDownLatch = new CountDownLatch(1); database.addChangeListener(new Database.ChangeListener() { @Override public void changed(Database.ChangeEvent event) { atomicInteger.incrementAndGet(); } }); database.runInTransaction(new TransactionalTask() { @Override public boolean run() { createDocuments(database, numDocs); countDownLatch.countDown(); return true; } }); boolean success = countDownLatch.await(30, TimeUnit.SECONDS); assertTrue(success); assertEquals(1, atomicInteger.get()); } /** * When making inserts outside of a transaction, there should be a change notification * for each insert (no batching) */ public void testChangeListenerNotification() throws Exception { final int numDocs = 50; final AtomicInteger atomicInteger = new AtomicInteger(0); database.addChangeListener(new Database.ChangeListener() { @Override public void changed(Database.ChangeEvent event) { atomicInteger.incrementAndGet(); } }); createDocuments(database, numDocs); assertEquals(numDocs, atomicInteger.get()); } /** * Change listeners should only be called once no matter how many times they're added. */ public void testAddChangeListenerIsIdempotent() throws Exception { final AtomicInteger count = new AtomicInteger(0); Database.ChangeListener listener = new Database.ChangeListener() { @Override public void changed(Database.ChangeEvent event) { count.incrementAndGet(); } }; database.addChangeListener(listener); database.addChangeListener(listener); createDocuments(database, 1); assertEquals(1, count.intValue()); } public void testGetActiveReplications() throws Exception { // create mock sync gateway that will serve as a pull target and return random docs int numMockDocsToServe = 0; MockDispatcher dispatcher = new MockDispatcher(); MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockDocsToServe, 1); dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB); server.setDispatcher(dispatcher); server.play(); final Replication replication = database.createPullReplication(server.getUrl("/db")); assertEquals(0, database.getAllReplications().size()); assertEquals(0, database.getActiveReplications().size()); final CountDownLatch replicationRunning = new CountDownLatch(1); replication.addChangeListener(new ReplicationActiveObserver(replicationRunning)); replication.start(); boolean success = replicationRunning.await(30, TimeUnit.SECONDS); assertTrue(success); assertEquals(1, database.getAllReplications().size()); assertEquals(1, database.getActiveReplications().size()); final CountDownLatch replicationDoneSignal = new CountDownLatch(1); replication.addChangeListener(new ReplicationFinishedObserver(replicationDoneSignal)); success = replicationDoneSignal.await(60, TimeUnit.SECONDS); assertTrue(success); // workaround race condition. Since our replication change listener will get triggered // _before_ the internal change listener that updates the activeReplications map, we // need to pause briefly to let the internal change listener to update activeReplications. Thread.sleep(500); assertEquals(1, database.getAllReplications().size()); assertEquals(0, database.getActiveReplications().size()); server.shutdown(); } public void testGetDatabaseNameFromPath() throws Exception { assertEquals("baz", FileDirUtils.getDatabaseNameFromPath("foo/bar/baz.cblite")); } public void testEncodeDocumentJSON() throws Exception { Map<String, Object> props = new HashMap<String, Object>(); props.put("_local_seq", ""); RevisionInternal revisionInternal = new RevisionInternal(props); byte[] encoded = RevisionUtils.asCanonicalJSON(revisionInternal); assertNotNull(encoded); } /** * in Database_Tests.m * - (void) test075_UpdateDocInTransaction */ public void testUpdateDocInTransaction() throws InterruptedException { // Test for #256, "Conflict error when updating a document multiple times in transaction block" // https://github.com/couchbase/couchbase-lite-ios/issues/256 Map<String, Object> properties = new HashMap<String, Object>(); properties.put("testName", "testUpdateDocInTransaction"); properties.put("count", 1); final Document doc = createDocumentWithProperties(database, properties); final CountDownLatch latch = new CountDownLatch(1); database.addChangeListener(new Database.ChangeListener() { @Override public void changed(Database.ChangeEvent event) { Log.i(TAG, "-- changed() --"); latch.countDown(); } }); assertTrue(database.runInTransaction(new TransactionalTask() { @Override public boolean run() { // Update doc. The currentRevision should update, but no notification be posted (yet). Map<String, Object> props1 = new HashMap<String, Object>(); props1.putAll(doc.getProperties()); props1.put("count", 2); SavedRevision rev1 = null; try { rev1 = doc.putProperties(props1); } catch (CouchbaseLiteException e) { Log.e(Log.TAG_DATABASE, e.toString()); return false; } assertNotNull(rev1); assertEquals(doc.getCurrentRevision(), rev1); assertEquals(1, latch.getCount()); // Update doc again; this should succeed, in the same manner. Map<String, Object> props2 = new HashMap<String, Object>(); props2.putAll(doc.getProperties()); props2.put("count", 3); SavedRevision rev2 = null; try { rev2 = doc.putProperties(props2); } catch (CouchbaseLiteException e) { Log.e(Log.TAG_DATABASE, e.toString()); return false; } assertNotNull(rev2); assertEquals(doc.getCurrentRevision(), rev2); assertEquals(1, latch.getCount()); return true; } })); assertTrue(latch.await(0, TimeUnit.SECONDS)); } }
apache-2.0
wuzhuobin/Vessel_Project
codes/Algorithm/LumenSegmentationFilter/LumenSegmentationFilter.h
2029
#ifndef __LUMENSEGMENTATION_H #define __LUMENSEGMENTATION_H #include <vtkPolyDataAlgorithm.h> #include <vtkImageData.h> #include <vtkSmartPointer.h> #include <vtkPolyData.h> #include <vtkPolyDataCollection.h> #include <vtkOrientedGlyphContourRepresentation.h> class LumenSegmentationFilter : public vtkPolyDataAlgorithm { public: vtkTypeMacro(LumenSegmentationFilter, vtkPolyDataAlgorithm); void PrintSelf(ostream& os, vtkIndent indent); static LumenSegmentationFilter *New(); //void SetInputData(vtkImageData* input); //vtkPolyData* GetOutput(); void SetVOI(int* VOI); void SetVOI(int extent0, int extent1, int extent2, int extent3, int extent4, int extent5); void SetSlice(int slice); /** * Set GenerateValuse of the vtkPolyDataContourFilter * placing 3 values in an array and set as the input of void SetGenerateValues(double* generateValues); * @param generateValues0 1st generateValue of contourFilter * @param generateValues1 2nd * @param generateValues2 3rd */ void SetGenerateValues(double generateValues0, double generateValues1, double generateValues2); void SetGenerateValues(double* generateValues); void SetVesselWallContourRepresentation(vtkContourRepresentation* vesselWallContourRepresentation); static void ReorderPolyData(vtkPolyData* lumenWallPolyData); protected: LumenSegmentationFilter(); ~LumenSegmentationFilter() {}; virtual int RequestData(vtkInformation* request, vtkInformationVector** inputVector, vtkInformationVector* outputVector); virtual int FillInputPortInformation(int port, vtkInformation *info); vtkContourRepresentation* vesselWallContourRepresentation = nullptr; //vtkSmartPointer<vtkPolyData> m_contour = NULL; //vtkSmartPointer<vtkImageData> input = NULL; //vtkSmartPointer<vtkImageData> extract = NULL; double generateValues[3] = { 0 }; int VOI[6] = { 0 }; private: LumenSegmentationFilter(const LumenSegmentationFilter&); // Not implemented. void operator=(const LumenSegmentationFilter&); // Not implemented. }; #endif
apache-2.0
Sependa/SalesforceSDKCore-Taptera
SalesforceSDKCoreTests/SFSmartStoreTestCase.h
2005
/* Copyright (c) 2013, salesforce.com, inc. All rights reserved. Redistribution and use of this software in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of salesforce.com, inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission of salesforce.com, inc. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #import <SenTestingKit/SenTestingKit.h> #import "SFSmartStore.h" @interface SFSmartStoreTestCase : SenTestCase - (void) assertSameJSONWithExpected:(id)expected actual:(id) actual message:(NSString*) message; - (void) assertSameJSONArrayWithExpected:(NSArray*)expected actual:(NSArray*) actual message:(NSString*) message; - (void) assertSameJSONMapWithExpected:(NSDictionary*)expected actual:(NSDictionary*) actual message:(NSString*) message; @end
apache-2.0
PRIDE-Archive/psm-index-service
src/main/java/uk/ac/ebi/pride/psmindex/search/model/Psm.java
2617
package uk.ac.ebi.pride.psmindex.search.model; import org.apache.solr.client.solrj.beans.Field; import uk.ac.ebi.pride.archive.dataprovider.identification.ModificationProvider; import uk.ac.ebi.pride.archive.dataprovider.identification.PeptideSequenceProvider; import uk.ac.ebi.pride.archive.dataprovider.param.CvParamProvider; import uk.ac.ebi.pride.indexutils.helpers.CvParamHelper; import uk.ac.ebi.pride.indexutils.helpers.ModificationHelper; import java.util.ArrayList; import java.util.List; public class Psm implements PeptideSequenceProvider { @Field(PsmFields.ID) private String id; @Field(PsmFields.REPORTED_ID) private String reportedId; @Field(PsmFields.PEPTIDE_SEQUENCE) private String peptideSequence; @Field(PsmFields.PROTEIN_ACCESSION) private String proteinAccession; @Field(PsmFields.PROJECT_ACCESSION) private String projectAccession; @Field(PsmFields.ASSAY_ACCESSION) private String assayAccession; @Field(PsmFields.MOD_NAMES) private List<String> modificationNames; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getReportedId() { return reportedId; } public void setReportedId(String reportedId) { this.reportedId = reportedId; } public String getPeptideSequence() { return peptideSequence; } public void setPeptideSequence(String peptideSequence) { this.peptideSequence = peptideSequence; } public String getProteinAccession() { return proteinAccession; } public void setProteinAccession(String proteinAccession) { this.proteinAccession = proteinAccession; } public String getProjectAccession() { return projectAccession; } public void setProjectAccession(String projectAccession) { this.projectAccession = projectAccession; } public String getAssayAccession() { return assayAccession; } public void setAssayAccession(String assayAccession) { this.assayAccession = assayAccession; } public Iterable<String> getModificationNames() { return modificationNames; } public void setModificationNames(List<ModificationProvider> modifications) { this.modificationNames = new ArrayList<>(); if (modifications!=null && modifications.size()>0) { for (ModificationProvider modification : modifications) { addModificationNames(modification); } } } public void addModificationNames(ModificationProvider modification) { if (modificationNames==null) { modificationNames = new ArrayList<>(); } modificationNames.add(modification.getName()); } }
apache-2.0
hakanu/iftar
_posts_/vakit/HOLLANDA/DE_KRIM_(ZWOLLE)/2017-02-01-DE_KRIM_(ZWOLLE).markdown
587
--- layout: vakit_dashboard title: DE KRIM (ZWOLLE), HOLLANDA için iftar, namaz vakitleri ve hava durumu - ilçe/eyalet seç permalink: /HOLLANDA/DE KRIM (ZWOLLE) --- ## DE KRIM (ZWOLLE) (HOLLANDA) için iftar, namaz vakitleri ve hava durumu görmek için bir ilçe/eyalet seç Aşağıdaki listeden bir şehir ya da semt seçin * [ (DE_KRIM_(ZWOLLE), HOLLANDA) için iftar ve namaz vakitleri](/HOLLANDA/DE_KRIM_(ZWOLLE)/) <script type="text/javascript"> var GLOBAL_COUNTRY = 'HOLLANDA'; var GLOBAL_CITY = 'DE KRIM (ZWOLLE)'; var GLOBAL_STATE = 'DE KRIM (ZWOLLE)'; </script>
apache-2.0
zhaobob/libyami
common/surfacepool.cpp
2169
/* * Copyright (C) 2015 Intel Corporation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "surfacepool.h" #include "common/log.h" namespace YamiMediaCodec{ SharedPtr<SurfacePool> SurfacePool::create(const SharedPtr<SurfaceAllocator>& alloc, uint32_t fourcc, uint32_t width, uint32_t height, uint32_t size) { SharedPtr<SurfacePool> pool(new SurfacePool); if (YAMI_SUCCESS != pool->init(alloc, fourcc, width, height, size)) pool.reset(); return pool; } SurfacePool::SurfacePool() { memset(&m_params, 0, sizeof(m_params)); } YamiStatus SurfacePool::init(const SharedPtr<SurfaceAllocator>& alloc, uint32_t fourcc, uint32_t width, uint32_t height, uint32_t size) { m_params.fourcc = fourcc; m_params.width = width; m_params.height = height; m_params.size = size; YamiStatus status = alloc->alloc(alloc.get(), &m_params); if (status != YAMI_SUCCESS) return status; //prepare surfaces for pool std::deque<SurfacePtr> surfaces; for (uint32_t i = 0; i < m_params.size; i++) { SurfacePtr s(new VaapiSurface(m_params.surfaces[i], width, height, fourcc)); surfaces.push_back(s); } m_pool = VideoPool<VaapiSurface>::create(surfaces); if (!m_pool) { ERROR("failed to create Surface Pool"); return YAMI_OUT_MEMORY; } m_alloc = alloc; return YAMI_SUCCESS; } SurfacePool::~SurfacePool() { if (m_alloc) { m_alloc->free(m_alloc.get(), &m_params); } } SurfacePtr SurfacePool::alloc() { return m_pool->alloc(); } } //YamiMediaCodec
apache-2.0
aircjm/JavaSeStudy
day24/code/day24_Thread/src/cn/itcast_05/SetThread.java
748
package cn.itcast_05; public class SetThread implements Runnable { private Student s; private int x = 0; public SetThread(Student s) { this.s = s; } @Override public void run() { while (true) { synchronized (s) { //判断有没有 if(s.flag){ try { s.wait(); //t1等着,释放锁 } catch (InterruptedException e) { e.printStackTrace(); } } if (x % 2 == 0) { s.name = "林青霞"; s.age = 27; } else { s.name = "刘意"; s.age = 30; } x++; //x=1 //修改标记 s.flag = true; //唤醒线程 s.notify(); //唤醒t2,唤醒并不表示你立马可以执行,必须还得抢CPU的执行权。 } //t1有,或者t2有 } } }
apache-2.0
google/blockly
scripts/package/node/index.js
339
/** * @license * Copyright 2019 Google LLC * SPDX-License-Identifier: Apache-2.0 */ /** * @fileoverview Blockly module for Node. It includes Blockly core, * built-in blocks, all the generators and the English locale. */ /* eslint-disable */ 'use strict'; // Include the EN Locale by default. Blockly.setLocale(En);
apache-2.0
psoreide/bnd
biz.aQute.resolve/test/biz/aQute/resolve/GenericResolveContextResolveTest.java
5184
package biz.aQute.resolve; import static test.lib.Utils.createRepo; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; import org.osgi.framework.namespace.IdentityNamespace; import org.osgi.resource.Capability; import org.osgi.resource.Requirement; import org.osgi.resource.Resource; import org.osgi.service.repository.Repository; import org.osgi.service.resolver.Resolver; import aQute.bnd.build.model.EE; import aQute.bnd.osgi.resource.CapReqBuilder; import aQute.bnd.version.Version; import aQute.bnd.version.VersionRange; import aQute.lib.io.IO; import junit.framework.TestCase; @SuppressWarnings("restriction") public class GenericResolveContextResolveTest extends TestCase { ResolverLogger logger = new ResolverLogger(0, System.out); private String getTestName() { return getClass().getName() + "/" + getName(); } /** * Simple basic resolve. We use a small index with gogo + framework and then * try to see if we can resolve the runtime from the shell requirement. * * @throws Exception */ public void testSimpleResolve() throws Exception { Repository repository = createRepo(IO.getFile("testdata/repo3.index.xml"), getTestName()); GenericResolveContext grc = new GenericResolveContext(logger); grc.setLevel(2); grc.addRepository(repository); grc.addFramework("org.apache.felix.framework", null); grc.addEE(EE.JavaSE_1_7); grc.addRequireBundle("org.apache.felix.gogo.shell", new VersionRange("[0,1]")); grc.done(); try (ResolverLogger logger = new ResolverLogger(4)) { Resolver resolver = new BndResolver(new ResolverLogger(4)); Set<Resource> resources = resolver.resolve(grc) .keySet(); assertNotNull(getResource(resources, "org.apache.felix.gogo.runtime", "0.10")); } } /** * Check default directive * * @throws Exception */ public void testResolveRequirementNoDirective() throws Exception { Repository repository = createRepo(IO.getFile("testdata/repo6/index.xml"), getTestName()); GenericResolveContext grc = new GenericResolveContext(logger); grc.setLevel(2); grc.addRepository(repository); Requirement logservice = new CapReqBuilder("osgi.service") .addDirective("filter", "(objectClass=org.osgi.service.log.LogService)") .buildSyntheticRequirement(); List<Capability> providers = grc.findProviders(logservice); assertEquals(2, providers.size()); assertNames(providers, "test.a", "test.b"); } /** * Check expressly set directive * * @throws Exception */ public void testResolveRequirementResolveDirective() throws Exception { Repository repository = createRepo(IO.getFile("testdata/repo6/index.xml"), getTestName()); GenericResolveContext grc = new GenericResolveContext(logger); grc.addRepository(repository); Requirement logservice = new CapReqBuilder("osgi.service") .addDirective("filter", "(objectClass=org.osgi.service.log.LogService)") .addDirective("effective", "resolve") .buildSyntheticRequirement(); List<Capability> providers = grc.findProviders(logservice); assertEquals(2, providers.size()); assertNames(providers, "test.a", "test.b"); } public void testResolveRequirementActiveDirective() throws Exception { Repository repository = createRepo(IO.getFile("testdata/repo6/index.xml"), getTestName()); GenericResolveContext grc = new GenericResolveContext(logger); grc.addRepository(repository); Requirement logservice = new CapReqBuilder("osgi.service") .addDirective("filter", "(objectClass=org.osgi.service.log.LogService)") .addDirective("effective", "active") .buildSyntheticRequirement(); List<Capability> providers = grc.findProviders(logservice); assertEquals(3, providers.size()); assertNames(providers, "test.a", "test.b", "test.c"); } private static Resource getResource(Set<Resource> resources, String bsn, String versionString) { for (Resource resource : resources) { List<Capability> identities = resource.getCapabilities(IdentityNamespace.IDENTITY_NAMESPACE); if (identities.size() == 1) { Capability idCap = identities.get(0); Object id = idCap.getAttributes() .get(IdentityNamespace.IDENTITY_NAMESPACE); Object version = idCap.getAttributes() .get(IdentityNamespace.CAPABILITY_VERSION_ATTRIBUTE); if (bsn.equals(id)) { if (versionString == null) { return resource; } Version requested = Version.parseVersion(versionString); Version current; if (version instanceof Version) { current = (Version) version; } else { current = Version.parseVersion("" + version); } if (requested.equals(current)) { return resource; } } } } return null; } void assertNames(List<Capability> providers, String... ids) { Set<String> resourceNames = new HashSet<>(); for (Capability cap : providers) { resourceNames.add(cap.getResource() .getCapabilities(IdentityNamespace.IDENTITY_NAMESPACE) .get(0) .getAttributes() .get(IdentityNamespace.IDENTITY_NAMESPACE) .toString()); } Set<String> expectedResourceNames = new HashSet<>(Arrays.asList(ids)); assertEquals(expectedResourceNames, resourceNames); } }
apache-2.0
ImpactDevelopment/ClientAPI
src/main/java/clientapi/gui/widget/data/DefaultWidgetAlignment.java
1076
/* * Copyright 2018 ImpactDevelopment * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package clientapi.gui.widget.data; /** * Default implementations of {@link WidgetAlignment} * * @see WidgetAlignment * * @author Brady * @since 5/28/2017 */ public enum DefaultWidgetAlignment implements WidgetAlignment { LEFT(-1.0F), CENTERED(-0.5F), RIGHT(0.0F); private float value; DefaultWidgetAlignment(float value) { this.value = value; } @Override public final float getValue() { return this.value; } }
apache-2.0
fabric8-ui/fabric8-ui
packages/planner/src/app/components_ngrx/assignee/assignee.component.ts
502
import { Component, Input } from '@angular/core'; import { User } from 'ngx-login-client'; @Component({ selector: 'f8-assignee', templateUrl: './assignee.component.html', styleUrls: ['./assignee.component.less'], }) export class AssigneesComponent { private assignees: User[] = []; @Input() truncateAfter: number; @Input() showFullName: boolean; @Input('assignees') set assigneeInput(val) { this.assignees = val; } @Input() overlapAvatar: boolean = false; constructor() {} }
apache-2.0
GwtMaterialDesign/gwt-material
gwt-material/src/main/java/gwt/material/design/client/ui/html/Code.java
1196
package gwt.material.design.client.ui.html; /* * #%L * GwtBootstrap3 * %% * Copyright (C) 2013 GwtBootstrap3 * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.google.gwt.dom.client.Document; import gwt.material.design.client.base.AbstractTextWidget; import gwt.material.design.client.base.helper.CodeHelper; /** * @author Ben Dol */ public class Code extends AbstractTextWidget { public Code() { super(Document.get().createElement("code")); } public Code(final String text) { this(); setHTML(text); } @Override public void setHTML(String html) { this.getElement().setInnerHTML(html); } }
apache-2.0
jay-hodgson/SynapseWebClient
src/main/webapp/js/goog/ui/editor/toolbarcontroller.js
9007
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Copyright 2008 Google Inc. All Rights Reserved. /** * @fileoverview A class for managing the editor toolbar. * * @see ../../demos/editor/editor.html */ goog.provide('goog.ui.editor.ToolbarController'); goog.require('goog.editor.Field.EventType'); goog.require('goog.events.EventHandler'); goog.require('goog.events.EventTarget'); goog.require('goog.ui.Component.EventType'); /** * A class for managing the editor toolbar. Acts as a bridge between * a {@link goog.editor.Field} and a {@link goog.ui.Toolbar}. * * The {@code toolbar} argument must be an instance of {@link goog.ui.Toolbar} * or a subclass. This class doesn't care how the toolbar was created. As * long as one or more controls hosted in the toolbar have IDs that match * built-in {@link goog.editor.Command}s, they will function as expected. It is * the caller's responsibility to ensure that the toolbar is already rendered * or that it decorates an existing element. * * * @param {!goog.editor.Field} field Editable field to be controlled by the * toolbar. * @param {!goog.ui.Toolbar} toolbar Toolbar to control the editable field. * @constructor * @extends {goog.events.EventTarget} */ goog.ui.editor.ToolbarController = function(field, toolbar) { goog.events.EventTarget.call(this); /** * Event handler to listen for field events and user actions. * @type {!goog.events.EventHandler} * @private */ this.handler_ = new goog.events.EventHandler(this); /** * The field instance controlled by the toolbar. * @type {!goog.editor.Field} * @private */ this.field_ = field; /** * The toolbar that controls the field. * @type {!goog.ui.Toolbar} * @private */ this.toolbar_ = toolbar; /** * Editing commands whose state is to be queried when updating the toolbar. * @type {!Array.<string>} * @private */ this.queryCommands_ = []; // Iterate over all buttons, and find those which correspond to // queryable commands. Add them to the list of commands to query on // each COMMAND_VALUE_CHANGE event. this.toolbar_.forEachChild(function(button) { if (button.queryable) { this.queryCommands_.push(this.getComponentId(button.getId())); } }, this); // Make sure the toolbar doesn't steal keyboard focus. this.toolbar_.setFocusable(false); // Hook up handlers that update the toolbar in response to field events, // and to execute editor commands in response to toolbar events. this.handler_. listen(this.field_, goog.editor.Field.EventType.COMMAND_VALUE_CHANGE, this.updateToolbar). listen(this.toolbar_, goog.ui.Component.EventType.ACTION, this.handleAction); }; goog.inherits(goog.ui.editor.ToolbarController, goog.events.EventTarget); /** * Returns the Closure component ID of the control that corresponds to the * given {@link goog.editor.Command} constant. * Subclasses may override this method if they want to use a custom mapping * scheme from commands to controls. * @param {string} command Editor command. * @return {string} Closure component ID of the corresponding toolbar * control, if any. * @protected */ goog.ui.editor.ToolbarController.prototype.getComponentId = function(command) { // The default implementation assumes that the component ID is the same as // the command constant. return command; }; /** * Returns the {@link goog.editor.Command} constant * that corresponds to the given Closure component ID. Subclasses may override * this method if they want to use a custom mapping scheme from controls to * commands. * @param {string} id Closure component ID of a toolbar control. * @return {string} Editor command or dialog constant corresponding to the * toolbar control, if any. * @protected */ goog.ui.editor.ToolbarController.prototype.getCommand = function(id) { // The default implementation assumes that the component ID is the same as // the command constant. return id; }; /** * Returns the event handler object for the editor toolbar. Useful for classes * that extend {@code goog.ui.editor.ToolbarController}. * @return {!goog.events.EventHandler} The event handler object. * @protected */ goog.ui.editor.ToolbarController.prototype.getHandler = function() { return this.handler_; }; /** * Returns the field instance managed by the toolbar. Useful for * classes that extend {@code goog.ui.editor.ToolbarController}. * @return {!goog.editor.Field} The field managed by the toolbar. * @protected */ goog.ui.editor.ToolbarController.prototype.getField = function() { return this.field_; }; /** * Returns the toolbar UI component that manages the editor. Useful for * classes that extend {@code goog.ui.editor.ToolbarController}. * @return {!goog.ui.Toolbar} The toolbar UI component. */ goog.ui.editor.ToolbarController.prototype.getToolbar = function() { return this.toolbar_; }; /** * @return {boolean} Whether the toolbar is visible. */ goog.ui.editor.ToolbarController.prototype.isVisible = function() { return this.toolbar_.isVisible(); }; /** * Shows or hides the toolbar. * @param {boolean} visible Whether to show or hide the toolbar. */ goog.ui.editor.ToolbarController.prototype.setVisible = function(visible) { this.toolbar_.setVisible(visible); }; /** * @return {boolean} Whether the toolbar is enabled. */ goog.ui.editor.ToolbarController.prototype.isEnabled = function() { return this.toolbar_.isEnabled(); }; /** * Enables or disables the toolbar. * @param {boolean} enabled Whether to enable or disable the toolbar. */ goog.ui.editor.ToolbarController.prototype.setEnabled = function(enabled) { this.toolbar_.setEnabled(enabled); }; /** * Programmatically blurs the editor toolbar, un-highlighting the currently * highlighted item, and closing the currently open menu (if any). */ goog.ui.editor.ToolbarController.prototype.blur = function() { // We can't just call this.toolbar_.getElement().blur(), because the toolbar // element itself isn't focusable, so goog.ui.Container#handleBlur isn't // registered to handle blur events. this.toolbar_.handleBlur(null); }; /** @inheritDoc */ goog.ui.editor.ToolbarController.prototype.disposeInternal = function() { goog.ui.editor.ToolbarController.superClass_.disposeInternal.call(this); if (this.handler_) { this.handler_.dispose(); delete this.handler_; } if (this.toolbar_) { this.toolbar_.dispose(); delete this.toolbar_; } delete this.field_; delete this.queryCommands_; }; /** * Updates the toolbar in response to editor events. Specifically, updates * button states based on {@code COMMAND_VALUE_CHANGE} events, reflecting the * effective formatting of the selection. * @param {goog.events.Event} e Editor event to handle. * @protected */ goog.ui.editor.ToolbarController.prototype.updateToolbar = function(e) { if (!this.toolbar_.isEnabled() || !this.dispatchEvent(goog.ui.Component.EventType.CHANGE)) { return; } var state; /** @preserveTry */ try { /** @type {Array.<string>} */ e.commands; // Added by dispatchEvent. // If the COMMAND_VALUE_CHANGE event specifies which commands changed // state, then we only need to update those ones, otherwise update all // commands. state = /** @type {Object} */ ( this.field_.queryCommandValue(e.commands || this.queryCommands_)); } catch (ex) { // TODO: Find out when/why this happens. state = {}; } this.updateToolbarFromState(state); }; /** * Updates the toolbar to reflect a given state. * @param {Object} state Object mapping editor commands to values. */ goog.ui.editor.ToolbarController.prototype.updateToolbarFromState = function(state) { for (var command in state) { var button = this.toolbar_.getChild(this.getComponentId(command)); if (button) { var value = state[command]; if (button.updateFromValue) { button.updateFromValue(value); } else { button.setChecked(!!value); } } } }; /** * Handles {@code ACTION} events dispatched by toolbar buttons in response to * user actions by executing the corresponding field command. * @param {goog.events.Event} e Action event to handle. * @protected */ goog.ui.editor.ToolbarController.prototype.handleAction = function(e) { var command = this.getCommand(e.target.getId()); this.field_.execCommand(command, e.target.getValue()); };
apache-2.0
ShengJunHu/FastAndroid
library/common/src/main/java/com/hsj/common/rxbus/BusThead.java
1429
/* * Copyright (c) 2017. HSJ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hsj.common.rxbus; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * @Author:HSJ * @E-mail:[email protected] * @Date:2018/2/25/14:18 * @Version:V1.0 * @Class:BusThead * @Description:线程注解 */ @Documented @Inherited @Target(ElementType.PARAMETER) @Retention(RetentionPolicy.SOURCE) public @interface BusThead { String CURRENT_THREAD = "current_thread"; String UI_THEAD = "ui_thread"; String MAIN_THEAD = "main_thread"; String NEW_THEAD = "new_thread"; String IO_THEAD = "io_thread"; }
apache-2.0
DavesMan/guava
guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/ImmutableBiMap.java
4820
/* * Copyright (C) 2009 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.collect; import static com.google.common.collect.CollectPreconditions.checkEntryNotNull; import com.google.common.annotations.Beta; import java.util.Comparator; import java.util.Map; import java.util.Map.Entry; import java.util.function.Function; import java.util.stream.Collector; /** * GWT emulation of {@link com.google.common.collect.ImmutableBiMap}. * * @author Hayward Chan */ public abstract class ImmutableBiMap<K, V> extends ForwardingImmutableMap<K, V> implements BiMap<K, V> { @Beta public static <T, K, V> Collector<T, ?, ImmutableBiMap<K, V>> toImmutableBiMap( Function<? super T, ? extends K> keyFunction, Function<? super T, ? extends V> valueFunction) { return CollectCollectors.toImmutableBiMap(keyFunction, valueFunction); } // Casting to any type is safe because the set will never hold any elements. @SuppressWarnings("unchecked") public static <K, V> ImmutableBiMap<K, V> of() { return (ImmutableBiMap<K, V>) RegularImmutableBiMap.EMPTY; } public static <K, V> ImmutableBiMap<K, V> of(K k1, V v1) { checkEntryNotNull(k1, v1); return new SingletonImmutableBiMap<K, V>(k1, v1); } public static <K, V> ImmutableBiMap<K, V> of(K k1, V v1, K k2, V v2) { return new RegularImmutableBiMap<K, V>(ImmutableMap.of(k1, v1, k2, v2)); } public static <K, V> ImmutableBiMap<K, V> of( K k1, V v1, K k2, V v2, K k3, V v3) { return new RegularImmutableBiMap<K, V>(ImmutableMap.of( k1, v1, k2, v2, k3, v3)); } public static <K, V> ImmutableBiMap<K, V> of( K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) { return new RegularImmutableBiMap<K, V>(ImmutableMap.of( k1, v1, k2, v2, k3, v3, k4, v4)); } public static <K, V> ImmutableBiMap<K, V> of( K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) { return new RegularImmutableBiMap<K, V>(ImmutableMap.of( k1, v1, k2, v2, k3, v3, k4, v4, k5, v5)); } public static <K, V> Builder<K, V> builder() { return new Builder<K, V>(); } public static final class Builder<K, V> extends ImmutableMap.Builder<K, V> { public Builder() {} Builder(int initCapacity) { super(initCapacity); } @Override public Builder<K, V> put(K key, V value) { super.put(key, value); return this; } @Override public Builder<K, V> put(Map.Entry<? extends K, ? extends V> entry) { super.put(entry); return this; } @Override public Builder<K, V> putAll(Map<? extends K, ? extends V> map) { super.putAll(map); return this; } @Override public Builder<K, V> putAll( Iterable<? extends Entry<? extends K, ? extends V>> entries) { super.putAll(entries); return this; } public Builder<K, V> orderEntriesByValue(Comparator<? super V> valueComparator) { super.orderEntriesByValue(valueComparator); return this; } Builder<K, V> combine(Builder<K, V> other) { super.combine(other); return this; } @Override public ImmutableBiMap<K, V> build() { ImmutableMap<K, V> map = super.build(); if (map.isEmpty()) { return of(); } return new RegularImmutableBiMap<K, V>(super.build()); } } public static <K, V> ImmutableBiMap<K, V> copyOf( Map<? extends K, ? extends V> map) { if (map instanceof ImmutableBiMap) { @SuppressWarnings("unchecked") // safe since map is not writable ImmutableBiMap<K, V> bimap = (ImmutableBiMap<K, V>) map; return bimap; } if (map.isEmpty()) { return of(); } ImmutableMap<K, V> immutableMap = ImmutableMap.copyOf(map); return new RegularImmutableBiMap<K, V>(immutableMap); } public static <K, V> ImmutableBiMap<K, V> copyOf( Iterable<? extends Entry<? extends K, ? extends V>> entries) { return new Builder<K, V>().putAll(entries).build(); } ImmutableBiMap(Map<K, V> delegate) { super(delegate); } public abstract ImmutableBiMap<V, K> inverse(); @Override public ImmutableSet<V> values() { return inverse().keySet(); } public final V forcePut(K key, V value) { throw new UnsupportedOperationException(); } }
apache-2.0
frvannes16/Cops-Robbers-Coding-Challenge
src/competition_code/libs/cssselect/__init__.py
639
# -*- coding: utf-8 -*- """ CSS Selectors based on XPath ============================ This module supports selecting XML/HTML elements based on CSS selectors. See the `CSSSelector` class for details. :copyright: (c) 2007-2012 Ian Bicking and contributors. See AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from cssselect.parser import (parse, Selector, FunctionalPseudoElement, SelectorError, SelectorSyntaxError) from cssselect.xpath import GenericTranslator, HTMLTranslator, ExpressionError VERSION = '1.0.1' __version__ = VERSION
apache-2.0
furesoft/roslyn
src/Compilers/Core/Portable/PEWriter/PeWriter.cs
69905
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Reflection.PortableExecutable; using System.Text; using System.Threading; using Microsoft.CodeAnalysis; using Roslyn.Utilities; using EmitContext = Microsoft.CodeAnalysis.Emit.EmitContext; namespace Microsoft.Cci { internal sealed class PeWriter { /// <summary> /// True if we should attempt to generate a deterministic output (no timestamps or random data). /// </summary> private readonly bool _deterministic; private readonly IModule _module; private readonly string _pdbPathOpt; private readonly bool _emitRuntimeStartupStub; private readonly int _sizeOfImportAddressTable; private MemoryStream _headerStream = new MemoryStream(1024); private readonly MemoryStream _emptyStream = new MemoryStream(0); private readonly NtHeader _ntHeader = new NtHeader(); private readonly BinaryWriter _rdataWriter = new BinaryWriter(new MemoryStream()); private readonly BinaryWriter _sdataWriter = new BinaryWriter(new MemoryStream()); private readonly BinaryWriter _tlsDataWriter = new BinaryWriter(new MemoryStream()); private readonly BinaryWriter _win32ResourceWriter = new BinaryWriter(new MemoryStream(1024)); private readonly BinaryWriter _coverageDataWriter = new BinaryWriter(new MemoryStream()); private SectionHeader _coverSection; private SectionHeader _relocSection; private SectionHeader _resourceSection; private SectionHeader _rdataSection; private SectionHeader _sdataSection; private SectionHeader _textSection; private SectionHeader _tlsSection; private PeWriter(IModule module, string pdbPathOpt, bool deterministic) { _module = module; _emitRuntimeStartupStub = module.RequiresStartupStub; _pdbPathOpt = pdbPathOpt; _deterministic = deterministic; _sizeOfImportAddressTable = _emitRuntimeStartupStub ? (!_module.Requires64bits ? 8 : 16) : 0; } private bool EmitPdb => _pdbPathOpt != null; public static bool WritePeToStream( EmitContext context, CommonMessageProvider messageProvider, Func<Stream> getPeStream, PdbWriter nativePdbWriterOpt, string pdbPathOpt, bool allowMissingMethodBodies, bool deterministic, CancellationToken cancellationToken) { // If PDB writer is given, we have to have PDB path. Debug.Assert(nativePdbWriterOpt == null || pdbPathOpt != null); var peWriter = new PeWriter(context.Module, pdbPathOpt, deterministic); var mdWriter = FullMetadataWriter.Create(context, messageProvider, allowMissingMethodBodies, deterministic, cancellationToken); return peWriter.WritePeToStream(mdWriter, getPeStream, nativePdbWriterOpt); } private bool WritePeToStream(MetadataWriter mdWriter, Func<Stream> getPeStream, PdbWriter nativePdbWriterOpt) { // TODO: we can precalculate the exact size of IL stream var ilBuffer = new MemoryStream(32 * 1024); var ilWriter = new BinaryWriter(ilBuffer); var metadataBuffer = new MemoryStream(16 * 1024); var metadataWriter = new BinaryWriter(metadataBuffer); var mappedFieldDataBuffer = new MemoryStream(); var mappedFieldDataWriter = new BinaryWriter(mappedFieldDataBuffer); var managedResourceBuffer = new MemoryStream(1024); var managedResourceWriter = new BinaryWriter(managedResourceBuffer); nativePdbWriterOpt?.SetMetadataEmitter(mdWriter); // Since we are producing a full assembly, we should not have a module version ID // imposed ahead-of time. Instead we will compute a deterministic module version ID // based on the contents of the generated stream. Debug.Assert(_module.PersistentIdentifier == default(Guid)); uint moduleVersionIdOffsetInMetadataStream; var calculateMethodBodyStreamRva = new Func<MetadataSizes, int>(mdSizes => { FillInTextSectionHeader(mdSizes); return (int)_textSection.RelativeVirtualAddress + _sizeOfImportAddressTable + 72; }); MetadataSizes metadataSizes; uint entryPointToken; mdWriter.SerializeMetadataAndIL( nativePdbWriterOpt, metadataWriter, ilWriter, mappedFieldDataWriter, managedResourceWriter, calculateMethodBodyStreamRva, CalculateMappedFieldDataStreamRva, out moduleVersionIdOffsetInMetadataStream, out entryPointToken, out metadataSizes); ContentId pdbContentId; if (nativePdbWriterOpt != null) { if (entryPointToken != 0) { nativePdbWriterOpt.SetEntryPoint(entryPointToken); } var assembly = _module.AsAssembly; if (assembly != null && assembly.Kind == ModuleKind.WindowsRuntimeMetadata) { // Dev12: If compiling to winmdobj, we need to add to PDB source spans of // all types and members for better error reporting by WinMDExp. nativePdbWriterOpt.WriteDefinitionLocations(_module.GetSymbolToLocationMap()); } else { #if DEBUG // validate that all definitions are writeable // if same scenario would happen in an winmdobj project nativePdbWriterOpt.AssertAllDefinitionsHaveTokens(_module.GetSymbolToLocationMap()); #endif } pdbContentId = nativePdbWriterOpt.GetContentId(); // the writer shall not be used after this point for writing: nativePdbWriterOpt = null; } else { pdbContentId = default(ContentId); } FillInSectionHeaders(); // fill in header fields. FillInNtHeader(metadataSizes, CalculateMappedFieldDataStreamRva(metadataSizes)); var corHeader = CreateCorHeader(metadataSizes, entryPointToken); // write to pe stream. Stream peStream = getPeStream(); if (peStream == null) { return false; } long ntHeaderTimestampPosition; long metadataPosition; WriteHeaders(peStream, out ntHeaderTimestampPosition); WriteTextSection( peStream, corHeader, metadataBuffer, ilBuffer, mappedFieldDataBuffer, managedResourceBuffer, metadataSizes, pdbContentId, out metadataPosition); WriteRdataSection(peStream); WriteSdataSection(peStream); WriteCoverSection(peStream); WriteTlsSection(peStream); WriteResourceSection(peStream); WriteRelocSection(peStream); if (_deterministic) { var mvidPosition = metadataPosition + moduleVersionIdOffsetInMetadataStream; WriteDeterministicGuidAndTimestamps(peStream, mvidPosition, ntHeaderTimestampPosition); } return true; } private int CalculateMappedFieldDataStreamRva(MetadataSizes metadataSizes) { FillInTextSectionHeader(metadataSizes); Debug.Assert(metadataSizes.MappedFieldDataSize % MetadataWriter.MappedFieldDataAlignment == 0); return (int)(_textSection.RelativeVirtualAddress + _textSection.VirtualSize - metadataSizes.MappedFieldDataSize); } /// <summary> /// Compute a deterministic Guid and timestamp based on the contents of the stream, and replace /// the 16 zero bytes at the given position and one or two 4-byte values with that computed Guid and timestamp. /// </summary> /// <param name="peStream">PE stream.</param> /// <param name="mvidPosition">Position in the stream of 16 zero bytes to be replaced by a Guid</param> /// <param name="ntHeaderTimestampPosition">Position in the stream of four zero bytes to be replaced by a timestamp</param> private static void WriteDeterministicGuidAndTimestamps( Stream peStream, long mvidPosition, long ntHeaderTimestampPosition) { Debug.Assert(mvidPosition != 0); Debug.Assert(ntHeaderTimestampPosition != 0); var previousPosition = peStream.Position; // Compute and write deterministic guid data over the relevant portion of the stream peStream.Position = 0; var contentId = ContentId.FromHash(CryptographicHashProvider.ComputeSha1(peStream)); // The existing Guid should be zero. CheckZeroDataInStream(peStream, mvidPosition, contentId.Guid.Length); peStream.Position = mvidPosition; peStream.Write(contentId.Guid, 0, contentId.Guid.Length); // The existing timestamp should be zero. CheckZeroDataInStream(peStream, ntHeaderTimestampPosition, contentId.Stamp.Length); peStream.Position = ntHeaderTimestampPosition; peStream.Write(contentId.Stamp, 0, contentId.Stamp.Length); peStream.Position = previousPosition; } [Conditional("DEBUG")] private static void CheckZeroDataInStream(Stream stream, long position, int bytes) { stream.Position = position; for (int i = 0; i < bytes; i++) { int value = stream.ReadByte(); Debug.Assert(value == 0); } } private int ComputeStrongNameSignatureSize() { IAssembly assembly = _module.AsAssembly; if (assembly == null) { return 0; } // EDMAURER the count of characters divided by two because the each pair of characters will turn in to one byte. int keySize = (assembly.SignatureKey == null) ? 0 : assembly.SignatureKey.Length / 2; if (keySize == 0) { keySize = assembly.PublicKey.Length; } if (keySize == 0) { return 0; } return (keySize < 128 + 32) ? 128 : keySize - 32; } private int ComputeOffsetToDebugTable(MetadataSizes metadataSizes) { return ComputeOffsetToMetadata(metadataSizes.ILStreamSize) + metadataSizes.MetadataSize + metadataSizes.ResourceDataSize + ComputeStrongNameSignatureSize(); // size of strong name hash } private int ComputeOffsetToImportTable(MetadataSizes metadataSizes) { // TODO: add size of unmanaged export stubs (when and if these are ever supported). return ComputeOffsetToDebugTable(metadataSizes) + ComputeSizeOfDebugDirectory(); } private int ComputeOffsetToMetadata(int ilStreamLength) { return _sizeOfImportAddressTable + 72 + // size of CLR header BitArithmeticUtilities.Align(ilStreamLength, 4); } private const int ImageDebugDirectoryBaseSize = sizeof(uint) + // Characteristics sizeof(uint) + // TimeDataStamp sizeof(uint) + // Version sizeof(uint) + // Type sizeof(uint) + // SizeOfData sizeof(uint) + // AddressOfRawData sizeof(uint); // PointerToRawData private int ComputeSizeOfDebugDirectoryData() { return 4 + // 4B signature "RSDS" 16 + // GUID sizeof(uint) + // Age Encoding.UTF8.GetByteCount(_pdbPathOpt) + 1; // Null terminator } private int ComputeSizeOfDebugDirectory() { return EmitPdb ? ImageDebugDirectoryBaseSize + ComputeSizeOfDebugDirectoryData() : 0; } private uint ComputeSizeOfPeHeaders() { ushort numberOfSections = 1; // .text if (_emitRuntimeStartupStub) numberOfSections++; //.reloc if (_tlsDataWriter.BaseStream.Length > 0) numberOfSections++; //.tls if (_rdataWriter.BaseStream.Length > 0) numberOfSections++; //.rdata if (_sdataWriter.BaseStream.Length > 0) numberOfSections++; //.sdata if (_coverageDataWriter.BaseStream.Length > 0) numberOfSections++; //.cover if (!IteratorHelper.EnumerableIsEmpty(_module.Win32Resources) || _module.Win32ResourceSection != null) numberOfSections++; //.rsrc; _ntHeader.NumberOfSections = numberOfSections; uint sizeOfPeHeaders = 128 + 4 + 20 + 224 + 40u * numberOfSections; if (_module.Requires64bits) { sizeOfPeHeaders += 16; } return sizeOfPeHeaders; } private int ComputeSizeOfTextSection(MetadataSizes metadataSizes) { int textSectionLength = this.ComputeOffsetToImportTable(metadataSizes); if (_emitRuntimeStartupStub) { textSectionLength += !_module.Requires64bits ? 66 : 70; //size of import table textSectionLength += 14; //size of name table textSectionLength = BitArithmeticUtilities.Align(textSectionLength, !_module.Requires64bits ? 4 : 8); //optional padding to make startup stub's target address align on word or double word boundary textSectionLength += !_module.Requires64bits ? 8 : 16; //fixed size of runtime startup stub } Debug.Assert(metadataSizes.MappedFieldDataSize % MetadataWriter.MappedFieldDataAlignment == 0); textSectionLength += metadataSizes.MappedFieldDataSize; return textSectionLength; } private uint ComputeSizeOfWin32Resources(uint resourcesRva) { this.SerializeWin32Resources(resourcesRva); uint result = 0; if (_win32ResourceWriter.BaseStream.Length > 0) { result += BitArithmeticUtilities.Align(_win32ResourceWriter.BaseStream.Length, 4); } // result += Align(this.win32ResourceWriter.BaseStream.Length+1, 8); return result; } private CorHeader CreateCorHeader(MetadataSizes metadataSizes, uint entryPointToken) { CorHeader corHeader = new CorHeader(); corHeader.CodeManagerTable.RelativeVirtualAddress = 0; corHeader.CodeManagerTable.Size = 0; corHeader.EntryPointToken = entryPointToken; corHeader.ExportAddressTableJumps.RelativeVirtualAddress = 0; corHeader.ExportAddressTableJumps.Size = 0; corHeader.Flags = this.GetCorHeaderFlags(); corHeader.MajorRuntimeVersion = 2; corHeader.MetadataDirectory.RelativeVirtualAddress = _textSection.RelativeVirtualAddress + (uint)ComputeOffsetToMetadata(metadataSizes.ILStreamSize); corHeader.MetadataDirectory.Size = (uint)metadataSizes.MetadataSize; corHeader.MinorRuntimeVersion = 5; corHeader.Resources.RelativeVirtualAddress = corHeader.MetadataDirectory.RelativeVirtualAddress + corHeader.MetadataDirectory.Size; corHeader.Resources.Size = (uint)metadataSizes.ResourceDataSize; corHeader.StrongNameSignature.RelativeVirtualAddress = corHeader.Resources.RelativeVirtualAddress + corHeader.Resources.Size; corHeader.StrongNameSignature.Size = (uint)ComputeStrongNameSignatureSize(); corHeader.VTableFixups.RelativeVirtualAddress = 0; corHeader.VTableFixups.Size = 0; return corHeader; } private void FillInNtHeader(MetadataSizes metadataSizes, int mappedFieldDataStreamRva) { bool use32bitAddresses = !_module.Requires64bits; NtHeader ntHeader = _ntHeader; ntHeader.AddressOfEntryPoint = _emitRuntimeStartupStub ? (uint)mappedFieldDataStreamRva - (use32bitAddresses ? 6u : 10u) : 0; ntHeader.BaseOfCode = _textSection.RelativeVirtualAddress; ntHeader.BaseOfData = _rdataSection.RelativeVirtualAddress; ntHeader.PointerToSymbolTable = 0; ntHeader.SizeOfCode = _textSection.SizeOfRawData; ntHeader.SizeOfInitializedData = _rdataSection.SizeOfRawData + _coverSection.SizeOfRawData + _sdataSection.SizeOfRawData + _tlsSection.SizeOfRawData + _resourceSection.SizeOfRawData + _relocSection.SizeOfRawData; ntHeader.SizeOfHeaders = BitArithmeticUtilities.Align(this.ComputeSizeOfPeHeaders(), _module.FileAlignment); ntHeader.SizeOfImage = BitArithmeticUtilities.Align(_relocSection.RelativeVirtualAddress + _relocSection.VirtualSize, 0x2000); ntHeader.SizeOfUninitializedData = 0; // In the PE File Header this is a "Time/Date Stamp" whose description is "Time and date // the file was created in seconds since January 1st 1970 00:00:00 or 0" // However, when we want to make it deterministic we fill it in (later) with bits from the hash of the full PE file. ntHeader.TimeDateStamp = _deterministic ? 0 : (uint)(DateTime.UtcNow - new DateTime(1970, 1, 1)).TotalSeconds; ntHeader.ImportAddressTable.RelativeVirtualAddress = (_emitRuntimeStartupStub) ? _textSection.RelativeVirtualAddress : 0; ntHeader.ImportAddressTable.Size = (uint)_sizeOfImportAddressTable; ntHeader.CliHeaderTable.RelativeVirtualAddress = _textSection.RelativeVirtualAddress + ntHeader.ImportAddressTable.Size; ntHeader.CliHeaderTable.Size = 72; ntHeader.ImportTable.RelativeVirtualAddress = _textSection.RelativeVirtualAddress + (uint)ComputeOffsetToImportTable(metadataSizes); if (!_emitRuntimeStartupStub) { ntHeader.ImportTable.Size = 0; ntHeader.ImportTable.RelativeVirtualAddress = 0; } else { ntHeader.ImportTable.Size = use32bitAddresses ? 66u : 70u; ntHeader.ImportTable.Size += 13; //size of nametable } ntHeader.BaseRelocationTable.RelativeVirtualAddress = (_emitRuntimeStartupStub) ? _relocSection.RelativeVirtualAddress : 0; ntHeader.BaseRelocationTable.Size = _relocSection.VirtualSize; ntHeader.BoundImportTable.RelativeVirtualAddress = 0; ntHeader.BoundImportTable.Size = 0; ntHeader.CertificateTable.RelativeVirtualAddress = 0; ntHeader.CertificateTable.Size = 0; ntHeader.CopyrightTable.RelativeVirtualAddress = 0; ntHeader.CopyrightTable.Size = 0; ntHeader.DebugTable.RelativeVirtualAddress = EmitPdb ? _textSection.RelativeVirtualAddress + (uint)ComputeOffsetToDebugTable(metadataSizes) : 0u; ntHeader.DebugTable.Size = EmitPdb ? ImageDebugDirectoryBaseSize : 0u; // Only the size of the fixed part of the debug table goes here. ntHeader.DelayImportTable.RelativeVirtualAddress = 0; ntHeader.DelayImportTable.Size = 0; ntHeader.ExceptionTable.RelativeVirtualAddress = 0; ntHeader.ExceptionTable.Size = 0; ntHeader.ExportTable.RelativeVirtualAddress = 0; ntHeader.ExportTable.Size = 0; ntHeader.GlobalPointerTable.RelativeVirtualAddress = 0; ntHeader.GlobalPointerTable.Size = 0; ntHeader.LoadConfigTable.RelativeVirtualAddress = 0; ntHeader.LoadConfigTable.Size = 0; ntHeader.Reserved.RelativeVirtualAddress = 0; ntHeader.Reserved.Size = 0; ntHeader.ResourceTable.RelativeVirtualAddress = _resourceSection.SizeOfRawData == 0 ? 0u : _resourceSection.RelativeVirtualAddress; ntHeader.ResourceTable.Size = _resourceSection.VirtualSize; ntHeader.ThreadLocalStorageTable.RelativeVirtualAddress = _tlsSection.SizeOfRawData == 0 ? 0u : _tlsSection.RelativeVirtualAddress; ntHeader.ThreadLocalStorageTable.Size = _tlsSection.SizeOfRawData; } private void FillInTextSectionHeader(MetadataSizes metadataSizes) { if (_textSection == null) { uint sizeOfPeHeaders = (uint)ComputeSizeOfPeHeaders(); uint sizeOfTextSection = (uint)ComputeSizeOfTextSection(metadataSizes); _textSection = new SectionHeader { Characteristics = 0x60000020, // section is read + execute + code Name = ".text", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = BitArithmeticUtilities.Align(sizeOfPeHeaders, _module.FileAlignment), PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(sizeOfPeHeaders, 0x2000), SizeOfRawData = BitArithmeticUtilities.Align(sizeOfTextSection, _module.FileAlignment), VirtualSize = sizeOfTextSection }; } } private void FillInSectionHeaders() { _rdataSection = new SectionHeader { Characteristics = 0x40000040, // section is read + initialized Name = ".rdata", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _textSection.PointerToRawData + _textSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(_textSection.RelativeVirtualAddress + _textSection.VirtualSize, 0x2000), SizeOfRawData = BitArithmeticUtilities.Align(_rdataWriter.BaseStream.Length, _module.FileAlignment), VirtualSize = _rdataWriter.BaseStream.Length, }; _sdataSection = new SectionHeader { Characteristics = 0xC0000040, // section is write + read + initialized Name = ".sdata", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _rdataSection.PointerToRawData + _rdataSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(_rdataSection.RelativeVirtualAddress + _rdataSection.VirtualSize, 0x2000), SizeOfRawData = BitArithmeticUtilities.Align(_sdataWriter.BaseStream.Length, _module.FileAlignment), VirtualSize = _sdataWriter.BaseStream.Length, }; _coverSection = new SectionHeader { Characteristics = 0xC8000040, // section is not paged + write + read + initialized Name = ".cover", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _sdataSection.PointerToRawData + _sdataSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(_sdataSection.RelativeVirtualAddress + _sdataSection.VirtualSize, 0x2000), SizeOfRawData = BitArithmeticUtilities.Align(_coverageDataWriter.BaseStream.Length, _module.FileAlignment), VirtualSize = _coverageDataWriter.BaseStream.Length, }; _tlsSection = new SectionHeader { Characteristics = 0xC0000040, // section is write + read + initialized Name = ".tls", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _coverSection.PointerToRawData + _coverSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(_coverSection.RelativeVirtualAddress + _coverSection.VirtualSize, 0x2000), SizeOfRawData = BitArithmeticUtilities.Align(_tlsDataWriter.BaseStream.Length, _module.FileAlignment), VirtualSize = _tlsDataWriter.BaseStream.Length, }; uint resourcesRva = BitArithmeticUtilities.Align(_tlsSection.RelativeVirtualAddress + _tlsSection.VirtualSize, 0x2000); uint sizeOfWin32Resources = this.ComputeSizeOfWin32Resources(resourcesRva); _resourceSection = new SectionHeader { Characteristics = 0x40000040, // section is read + initialized Name = ".rsrc", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _tlsSection.PointerToRawData + _tlsSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = resourcesRva, SizeOfRawData = BitArithmeticUtilities.Align(sizeOfWin32Resources, _module.FileAlignment), VirtualSize = sizeOfWin32Resources, }; _relocSection = new SectionHeader { Characteristics = 0x42000040, // section is read + discardable + initialized Name = ".reloc", NumberOfLinenumbers = 0, NumberOfRelocations = 0, PointerToLinenumbers = 0, PointerToRawData = _resourceSection.PointerToRawData + _resourceSection.SizeOfRawData, PointerToRelocations = 0, RelativeVirtualAddress = BitArithmeticUtilities.Align(_resourceSection.RelativeVirtualAddress + _resourceSection.VirtualSize, 0x2000), SizeOfRawData = _emitRuntimeStartupStub ? _module.FileAlignment : 0, VirtualSize = _emitRuntimeStartupStub ? (_module.Requires64bits && !_module.RequiresAmdInstructionSet ? 14u : 12u) : 0, }; } private CorFlags GetCorHeaderFlags() { CorFlags result = 0; if (_module.ILOnly) { result |= CorFlags.ILOnly; } if (_module.Requires32bits) { result |= CorFlags.Requires32Bit; } if (_module.StrongNameSigned) { result |= CorFlags.StrongNameSigned; } if (_module.TrackDebugData) { result |= CorFlags.TrackDebugData; } if (_module.Prefers32bits) { result |= CorFlags.Requires32Bit | CorFlags.Prefers32Bit; } return result; } //// //// Resource Format. //// //// //// Resource directory consists of two counts, following by a variable length //// array of directory entries. The first count is the number of entries at //// beginning of the array that have actual names associated with each entry. //// The entries are in ascending order, case insensitive strings. The second //// count is the number of entries that immediately follow the named entries. //// This second count identifies the number of entries that have 16-bit integer //// Ids as their name. These entries are also sorted in ascending order. //// //// This structure allows fast lookup by either name or number, but for any //// given resource entry only one form of lookup is supported, not both. //// This is consistant with the syntax of the .RC file and the .RES file. //// //typedef struct _IMAGE_RESOURCE_DIRECTORY { // DWORD Characteristics; // DWORD TimeDateStamp; // WORD MajorVersion; // WORD MinorVersion; // WORD NumberOfNamedEntries; // WORD NumberOfIdEntries; //// IMAGE_RESOURCE_DIRECTORY_ENTRY DirectoryEntries[]; //} IMAGE_RESOURCE_DIRECTORY, *PIMAGE_RESOURCE_DIRECTORY; //#define IMAGE_RESOURCE_NAME_IS_STRING 0x80000000 //#define IMAGE_RESOURCE_DATA_IS_DIRECTORY 0x80000000 //// //// Each directory contains the 32-bit Name of the entry and an offset, //// relative to the beginning of the resource directory of the data associated //// with this directory entry. If the name of the entry is an actual text //// string instead of an integer Id, then the high order bit of the name field //// is set to one and the low order 31-bits are an offset, relative to the //// beginning of the resource directory of the string, which is of type //// IMAGE_RESOURCE_DIRECTORY_STRING. Otherwise the high bit is clear and the //// low-order 16-bits are the integer Id that identify this resource directory //// entry. If the directory entry is yet another resource directory (i.e. a //// subdirectory), then the high order bit of the offset field will be //// set to indicate this. Otherwise the high bit is clear and the offset //// field points to a resource data entry. //// //typedef struct _IMAGE_RESOURCE_DIRECTORY_ENTRY { // union { // struct { // DWORD NameOffset:31; // DWORD NameIsString:1; // } DUMMYSTRUCTNAME; // DWORD Name; // WORD Id; // } DUMMYUNIONNAME; // union { // DWORD OffsetToData; // struct { // DWORD OffsetToDirectory:31; // DWORD DataIsDirectory:1; // } DUMMYSTRUCTNAME2; // } DUMMYUNIONNAME2; //} IMAGE_RESOURCE_DIRECTORY_ENTRY, *PIMAGE_RESOURCE_DIRECTORY_ENTRY; //// //// For resource directory entries that have actual string names, the Name //// field of the directory entry points to an object of the following type. //// All of these string objects are stored together after the last resource //// directory entry and before the first resource data object. This minimizes //// the impact of these variable length objects on the alignment of the fixed //// size directory entry objects. //// //typedef struct _IMAGE_RESOURCE_DIRECTORY_STRING { // WORD Length; // CHAR NameString[ 1 ]; //} IMAGE_RESOURCE_DIRECTORY_STRING, *PIMAGE_RESOURCE_DIRECTORY_STRING; //typedef struct _IMAGE_RESOURCE_DIR_STRING_U { // WORD Length; // WCHAR NameString[ 1 ]; //} IMAGE_RESOURCE_DIR_STRING_U, *PIMAGE_RESOURCE_DIR_STRING_U; //// //// Each resource data entry describes a leaf node in the resource directory //// tree. It contains an offset, relative to the beginning of the resource //// directory of the data for the resource, a size field that gives the number //// of bytes of data at that offset, a CodePage that should be used when //// decoding code point values within the resource data. Typically for new //// applications the code page would be the unicode code page. //// //typedef struct _IMAGE_RESOURCE_DATA_ENTRY { // DWORD OffsetToData; // DWORD Size; // DWORD CodePage; // DWORD Reserved; //} IMAGE_RESOURCE_DATA_ENTRY, *PIMAGE_RESOURCE_DATA_ENTRY; private class Directory { internal readonly string Name; internal readonly int ID; internal ushort NumberOfNamedEntries; internal ushort NumberOfIdEntries; internal readonly List<object> Entries; internal Directory(string name, int id) { this.Name = name; this.ID = id; this.Entries = new List<object>(); } } private static int CompareResources(IWin32Resource left, IWin32Resource right) { int result = CompareResourceIdentifiers(left.TypeId, left.TypeName, right.TypeId, right.TypeName); return (result == 0) ? CompareResourceIdentifiers(left.Id, left.Name, right.Id, right.Name) : result; } //when comparing a string vs ordinal, the string should always be less than the ordinal. Per the spec, //entries identified by string must precede those identified by ordinal. private static int CompareResourceIdentifiers(int xOrdinal, string xString, int yOrdinal, string yString) { if (xString == null) { if (yString == null) { return xOrdinal - yOrdinal; } else { return 1; } } else if (yString == null) { return -1; } else { return String.Compare(xString, yString, StringComparison.OrdinalIgnoreCase); } } //sort the resources by ID least to greatest then by NAME. //Where strings and ordinals are compared, strings are less than ordinals. internal static IEnumerable<IWin32Resource> SortResources(IEnumerable<IWin32Resource> resources) { return resources.OrderBy(CompareResources); } //Win32 resources are supplied to the compiler in one of two forms, .RES (the output of the resource compiler), //or .OBJ (the output of running cvtres.exe on a .RES file). A .RES file is parsed and processed into //a set of objects implementing IWin32Resources. These are then ordered and the final image form is constructed //and written to the resource section. Resources in .OBJ form are already very close to their final output //form. Rather than reading them and parsing them into a set of objects similar to those produced by //processing a .RES file, we process them like the native linker would, copy the relevant sections from //the .OBJ into our output and apply some fixups. private void SerializeWin32Resources(uint resourcesRva) { var resourceSection = _module.Win32ResourceSection; if (resourceSection != null) { SerializeWin32Resources(resourceSection, resourcesRva); return; } var theResources = _module.Win32Resources; if (IteratorHelper.EnumerableIsEmpty(theResources)) { return; } SerializeWin32Resources(theResources, resourcesRva); } private void SerializeWin32Resources(IEnumerable<IWin32Resource> theResources, uint resourcesRva) { theResources = SortResources(theResources); Directory typeDirectory = new Directory(string.Empty, 0); Directory nameDirectory = null; Directory languageDirectory = null; int lastTypeID = int.MinValue; string lastTypeName = null; int lastID = int.MinValue; string lastName = null; uint sizeOfDirectoryTree = 16; //EDMAURER note that this list is assumed to be sorted lowest to highest //first by typeId, then by Id. foreach (IWin32Resource r in theResources) { bool typeDifferent = (r.TypeId < 0 && r.TypeName != lastTypeName) || r.TypeId > lastTypeID; if (typeDifferent) { lastTypeID = r.TypeId; lastTypeName = r.TypeName; if (lastTypeID < 0) { Debug.Assert(typeDirectory.NumberOfIdEntries == 0, "Not all Win32 resources with types encoded as strings precede those encoded as ints"); typeDirectory.NumberOfNamedEntries++; } else { typeDirectory.NumberOfIdEntries++; } sizeOfDirectoryTree += 24; typeDirectory.Entries.Add(nameDirectory = new Directory(lastTypeName, lastTypeID)); } if (typeDifferent || (r.Id < 0 && r.Name != lastName) || r.Id > lastID) { lastID = r.Id; lastName = r.Name; if (lastID < 0) { Debug.Assert(nameDirectory.NumberOfIdEntries == 0, "Not all Win32 resources with names encoded as strings precede those encoded as ints"); nameDirectory.NumberOfNamedEntries++; } else { nameDirectory.NumberOfIdEntries++; } sizeOfDirectoryTree += 24; nameDirectory.Entries.Add(languageDirectory = new Directory(lastName, lastID)); } languageDirectory.NumberOfIdEntries++; sizeOfDirectoryTree += 8; languageDirectory.Entries.Add(r); } MemoryStream stream = MemoryStream.GetInstance(); BinaryWriter dataWriter = new BinaryWriter(stream, true); //'dataWriter' is where opaque resource data goes as well as strings that are used as type or name identifiers this.WriteDirectory(typeDirectory, _win32ResourceWriter, 0, 0, sizeOfDirectoryTree, resourcesRva, dataWriter); dataWriter.BaseStream.WriteTo(_win32ResourceWriter.BaseStream); _win32ResourceWriter.WriteByte(0); while ((_win32ResourceWriter.BaseStream.Length % 4) != 0) { _win32ResourceWriter.WriteByte(0); } stream.Free(); } private void WriteDirectory(Directory directory, BinaryWriter writer, uint offset, uint level, uint sizeOfDirectoryTree, uint virtualAddressBase, BinaryWriter dataWriter) { writer.WriteUint(0); // Characteristics writer.WriteUint(0); // Timestamp writer.WriteUint(0); // Version writer.WriteUshort(directory.NumberOfNamedEntries); writer.WriteUshort(directory.NumberOfIdEntries); uint n = (uint)directory.Entries.Count; uint k = offset + 16 + n * 8; for (int i = 0; i < n; i++) { int id; string name; uint nameOffset = dataWriter.BaseStream.Position + sizeOfDirectoryTree; uint directoryOffset = k; Directory subDir = directory.Entries[i] as Directory; if (subDir != null) { id = subDir.ID; name = subDir.Name; if (level == 0) { k += SizeOfDirectory(subDir); } else { k += 16 + 8 * (uint)subDir.Entries.Count; } } else { //EDMAURER write out an IMAGE_RESOURCE_DATA_ENTRY followed //immediately by the data that it refers to. This results //in a layout different than that produced by pulling the resources //from an OBJ. In that case all of the data bits of a resource are //contiguous in .rsrc$02. After processing these will end up at //the end of .rsrc following all of the directory //info and IMAGE_RESOURCE_DATA_ENTRYs IWin32Resource r = (IWin32Resource)directory.Entries[i]; id = level == 0 ? r.TypeId : level == 1 ? r.Id : (int)r.LanguageId; name = level == 0 ? r.TypeName : level == 1 ? r.Name : null; dataWriter.WriteUint(virtualAddressBase + sizeOfDirectoryTree + 16 + dataWriter.BaseStream.Position); byte[] data = new List<byte>(r.Data).ToArray(); dataWriter.WriteUint((uint)data.Length); dataWriter.WriteUint(r.CodePage); dataWriter.WriteUint(0); dataWriter.WriteBytes(data); while ((dataWriter.BaseStream.Length % 4) != 0) { dataWriter.WriteByte(0); } } if (id >= 0) { writer.WriteInt(id); } else { if (name == null) { name = string.Empty; } writer.WriteUint(nameOffset | 0x80000000); dataWriter.WriteUshort((ushort)name.Length); dataWriter.WriteChars(name.ToCharArray()); // REVIEW: what happens if the name contains chars that do not fit into a single utf8 code point? } if (subDir != null) { writer.WriteUint(directoryOffset | 0x80000000); } else { writer.WriteUint(nameOffset); } } k = offset + 16 + n * 8; for (int i = 0; i < n; i++) { Directory subDir = directory.Entries[i] as Directory; if (subDir != null) { this.WriteDirectory(subDir, writer, k, level + 1, sizeOfDirectoryTree, virtualAddressBase, dataWriter); if (level == 0) { k += SizeOfDirectory(subDir); } else { k += 16 + 8 * (uint)subDir.Entries.Count; } } } } private static uint SizeOfDirectory(Directory/*!*/ directory) { uint n = (uint)directory.Entries.Count; uint size = 16 + 8 * n; for (int i = 0; i < n; i++) { Directory subDir = directory.Entries[i] as Directory; if (subDir != null) { size += 16 + 8 * (uint)subDir.Entries.Count; } } return size; } private void SerializeWin32Resources(ResourceSection resourceSections, uint resourcesRva) { _win32ResourceWriter.WriteBytes(resourceSections.SectionBytes); var savedPosition = _win32ResourceWriter.BaseStream.Position; var readStream = new System.IO.MemoryStream(resourceSections.SectionBytes); var reader = new BinaryReader(readStream); foreach (int addressToFixup in resourceSections.Relocations) { _win32ResourceWriter.BaseStream.Position = (uint)addressToFixup; reader.BaseStream.Position = addressToFixup; _win32ResourceWriter.WriteUint(reader.ReadUInt32() + resourcesRva); } _win32ResourceWriter.BaseStream.Position = savedPosition; } //#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 // Relocation info stripped from file. //#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 // File is executable (i.e. no unresolved externel references). //#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 // Line nunbers stripped from file. //#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 // Local symbols stripped from file. //#define IMAGE_FILE_AGGRESIVE_WS_TRIM 0x0010 // Agressively trim working set //#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 // App can handle >2gb addresses //#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 // Bytes of machine word are reversed. //#define IMAGE_FILE_32BIT_MACHINE 0x0100 // 32 bit word machine. //#define IMAGE_FILE_DEBUG_STRIPPED 0x0200 // Debugging info stripped from file in .DBG file //#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 // If Image is on removable media, copy and run from the swap file. //#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 // If Image is on Net, copy and run from the swap file. //#define IMAGE_FILE_SYSTEM 0x1000 // System File. //#define IMAGE_FILE_DLL 0x2000 // File is a DLL. //#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 // File should only be run on a UP machine //#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 // Bytes of machine word are reversed. private static readonly byte[] s_dosHeader = new byte[] { 0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; private void WriteHeaders(Stream peStream, out long ntHeaderTimestampPosition) { IModule module = _module; NtHeader ntHeader = _ntHeader; BinaryWriter writer = new BinaryWriter(_headerStream); // MS-DOS stub (128 bytes) writer.WriteBytes(s_dosHeader); // TODO: provide an option to suppress the second half of the DOS header? // PE Signature (4 bytes) writer.WriteUint(0x00004550); /* "PE\0\0" */ // COFF Header 20 bytes writer.WriteUshort((ushort)module.Machine); writer.WriteUshort(ntHeader.NumberOfSections); ntHeaderTimestampPosition = writer.BaseStream.Position + peStream.Position; writer.WriteUint(ntHeader.TimeDateStamp); writer.WriteUint(ntHeader.PointerToSymbolTable); writer.WriteUint(0); // NumberOfSymbols writer.WriteUshort((ushort)(!module.Requires64bits ? 224 : 240)); // SizeOfOptionalHeader // ushort characteristics = 0x0002|0x0004|0x0008; // executable | no COFF line nums | no COFF symbols (as required by the standard) ushort characteristics = 0x0002; // executable (as required by the Linker team). if (module.Kind == ModuleKind.DynamicallyLinkedLibrary || module.Kind == ModuleKind.WindowsRuntimeMetadata) { characteristics |= 0x2000; } if (module.Requires32bits) { characteristics |= 0x0100; // 32 bit machine (The standard says to always set this, the linker team says otherwise) //The loader team says that this is not used for anything in the OS. } else { characteristics |= 0x0020; // large address aware (the standard says never to set this, the linker team says otherwise). //The loader team says that this is not overridden for managed binaries and will be respected if set. } writer.WriteUshort(characteristics); // PE Header (224 bytes if 32 bits, 240 bytes if 64 bit) if (!module.Requires64bits) { writer.WriteUshort(0x10B); // Magic = PE32 // 2 } else { writer.WriteUshort(0x20B); // Magic = PE32+ // 2 } writer.WriteByte(module.LinkerMajorVersion); // 3 writer.WriteByte(module.LinkerMinorVersion); // 4 writer.WriteUint(ntHeader.SizeOfCode); // 8 writer.WriteUint(ntHeader.SizeOfInitializedData); // 12 writer.WriteUint(ntHeader.SizeOfUninitializedData); // 16 writer.WriteUint(ntHeader.AddressOfEntryPoint); // 20 writer.WriteUint(ntHeader.BaseOfCode); // 24 if (!module.Requires64bits) { writer.WriteUint(ntHeader.BaseOfData); // 28 writer.WriteUint((uint)module.BaseAddress); // 32 } else { writer.WriteUlong(module.BaseAddress); // 32 } writer.WriteUint(0x2000); // SectionAlignment 36 writer.WriteUint(module.FileAlignment); // 40 writer.WriteUshort(4); // MajorOperatingSystemVersion 42 writer.WriteUshort(0); // MinorOperatingSystemVersion 44 writer.WriteUshort(0); // MajorImageVersion 46 writer.WriteUshort(0); // MinorImageVersion 48 writer.WriteUshort(module.MajorSubsystemVersion); // MajorSubsystemVersion 50 writer.WriteUshort(module.MinorSubsystemVersion); // MinorSubsystemVersion 52 writer.WriteUint(0); // Win32VersionValue 56 writer.WriteUint(ntHeader.SizeOfImage); // 60 writer.WriteUint(ntHeader.SizeOfHeaders); // 64 writer.WriteUint(0); // CheckSum 68 switch (module.Kind) { case ModuleKind.ConsoleApplication: case ModuleKind.DynamicallyLinkedLibrary: case ModuleKind.WindowsRuntimeMetadata: writer.WriteUshort(3); // 70 break; case ModuleKind.WindowsApplication: writer.WriteUshort(2); // 70 break; default: writer.WriteUshort(0); // break; } writer.WriteUshort(module.DllCharacteristics); if (!module.Requires64bits) { writer.WriteUint((uint)module.SizeOfStackReserve); // 76 writer.WriteUint((uint)module.SizeOfStackCommit); // 80 writer.WriteUint((uint)module.SizeOfHeapReserve); // 84 writer.WriteUint((uint)module.SizeOfHeapCommit); // 88 } else { writer.WriteUlong(module.SizeOfStackReserve); // 80 writer.WriteUlong(module.SizeOfStackCommit); // 88 writer.WriteUlong(module.SizeOfHeapReserve); // 96 writer.WriteUlong(module.SizeOfHeapCommit); // 104 } writer.WriteUint(0); // LoaderFlags 92|108 writer.WriteUint(16); // numberOfDataDirectories 96|112 writer.WriteUint(ntHeader.ExportTable.RelativeVirtualAddress); // 100|116 writer.WriteUint(ntHeader.ExportTable.Size); // 104|120 writer.WriteUint(ntHeader.ImportTable.RelativeVirtualAddress); // 108|124 writer.WriteUint(ntHeader.ImportTable.Size); // 112|128 writer.WriteUint(ntHeader.ResourceTable.RelativeVirtualAddress); // 116|132 writer.WriteUint(ntHeader.ResourceTable.Size); // 120|136 writer.WriteUint(ntHeader.ExceptionTable.RelativeVirtualAddress); // 124|140 writer.WriteUint(ntHeader.ExceptionTable.Size); // 128|144 writer.WriteUint(ntHeader.CertificateTable.RelativeVirtualAddress); // 132|148 writer.WriteUint(ntHeader.CertificateTable.Size); // 136|152 writer.WriteUint(ntHeader.BaseRelocationTable.RelativeVirtualAddress); // 140|156 writer.WriteUint(ntHeader.BaseRelocationTable.Size); // 144|160 writer.WriteUint(ntHeader.DebugTable.RelativeVirtualAddress); // 148|164 writer.WriteUint(ntHeader.DebugTable.Size); // 152|168 writer.WriteUint(ntHeader.CopyrightTable.RelativeVirtualAddress); // 156|172 writer.WriteUint(ntHeader.CopyrightTable.Size); // 160|176 writer.WriteUint(ntHeader.GlobalPointerTable.RelativeVirtualAddress); // 164|180 writer.WriteUint(ntHeader.GlobalPointerTable.Size); // 168|184 writer.WriteUint(ntHeader.ThreadLocalStorageTable.RelativeVirtualAddress); // 172|188 writer.WriteUint(ntHeader.ThreadLocalStorageTable.Size); // 176|192 writer.WriteUint(ntHeader.LoadConfigTable.RelativeVirtualAddress); // 180|196 writer.WriteUint(ntHeader.LoadConfigTable.Size); // 184|200 writer.WriteUint(ntHeader.BoundImportTable.RelativeVirtualAddress); // 188|204 writer.WriteUint(ntHeader.BoundImportTable.Size); // 192|208 writer.WriteUint(ntHeader.ImportAddressTable.RelativeVirtualAddress); // 196|212 writer.WriteUint(ntHeader.ImportAddressTable.Size); // 200|216 writer.WriteUint(ntHeader.DelayImportTable.RelativeVirtualAddress); // 204|220 writer.WriteUint(ntHeader.DelayImportTable.Size); // 208|224 writer.WriteUint(ntHeader.CliHeaderTable.RelativeVirtualAddress); // 212|228 writer.WriteUint(ntHeader.CliHeaderTable.Size); // 216|232 writer.WriteUlong(0); // 224|240 // Section Headers WriteSectionHeader(_textSection, writer); WriteSectionHeader(_rdataSection, writer); WriteSectionHeader(_sdataSection, writer); WriteSectionHeader(_coverSection, writer); WriteSectionHeader(_resourceSection, writer); WriteSectionHeader(_relocSection, writer); WriteSectionHeader(_tlsSection, writer); writer.BaseStream.WriteTo(peStream); _headerStream = _emptyStream; } private static void WriteSectionHeader(SectionHeader sectionHeader, BinaryWriter writer) { if (sectionHeader.VirtualSize == 0) { return; } for (int j = 0, m = sectionHeader.Name.Length; j < 8; j++) { if (j < m) { writer.WriteByte((byte)sectionHeader.Name[j]); } else { writer.WriteByte(0); } } writer.WriteUint(sectionHeader.VirtualSize); writer.WriteUint(sectionHeader.RelativeVirtualAddress); writer.WriteUint(sectionHeader.SizeOfRawData); writer.WriteUint(sectionHeader.PointerToRawData); writer.WriteUint(sectionHeader.PointerToRelocations); writer.WriteUint(sectionHeader.PointerToLinenumbers); writer.WriteUshort(sectionHeader.NumberOfRelocations); writer.WriteUshort(sectionHeader.NumberOfLinenumbers); writer.WriteUint(sectionHeader.Characteristics); } private void WriteTextSection( Stream peStream, CorHeader corHeader, MemoryStream metadataStream, MemoryStream ilStream, MemoryStream mappedFieldDataStream, MemoryStream managedResourceStream, MetadataSizes metadataSizes, ContentId pdbContentId, out long metadataPosition) { peStream.Position = _textSection.PointerToRawData; if (_emitRuntimeStartupStub) { this.WriteImportAddressTable(peStream); } WriteCorHeader(peStream, corHeader); WriteIL(peStream, ilStream); metadataPosition = peStream.Position; WriteMetadata(peStream, metadataStream); WriteManagedResources(peStream, managedResourceStream); WriteSpaceForHash(peStream, (int)corHeader.StrongNameSignature.Size); WriteDebugTable(peStream, pdbContentId, metadataSizes); if (_emitRuntimeStartupStub) { WriteImportTable(peStream); WriteNameTable(peStream); WriteRuntimeStartupStub(peStream); } WriteMappedFieldData(peStream, mappedFieldDataStream); } private void WriteImportAddressTable(Stream peStream) { BinaryWriter writer = new BinaryWriter(new MemoryStream(16)); bool use32bitAddresses = !_module.Requires64bits; uint importTableRVA = _ntHeader.ImportTable.RelativeVirtualAddress; uint ilRVA = importTableRVA + 40; uint hintRva = ilRVA + (use32bitAddresses ? 12u : 16u); // Import Address Table if (use32bitAddresses) { writer.WriteUint(hintRva); // 4 writer.WriteUint(0); // 8 } else { writer.WriteUlong(hintRva); // 8 writer.WriteUlong(0); // 16 } writer.BaseStream.WriteTo(peStream); } private void WriteImportTable(Stream peStream) { BinaryWriter writer = new BinaryWriter(new MemoryStream(70)); bool use32bitAddresses = !_module.Requires64bits; uint importTableRVA = _ntHeader.ImportTable.RelativeVirtualAddress; uint ilRVA = importTableRVA + 40; uint hintRva = ilRVA + (use32bitAddresses ? 12u : 16u); uint nameRva = hintRva + 12 + 2; // Import table writer.WriteUint(ilRVA); // 4 writer.WriteUint(0); // 8 writer.WriteUint(0); // 12 writer.WriteUint(nameRva); // 16 writer.WriteUint(_ntHeader.ImportAddressTable.RelativeVirtualAddress); // 20 writer.BaseStream.Position += 20; // 40 // Import Lookup table if (use32bitAddresses) { writer.WriteUint(hintRva); // 44 writer.WriteUint(0); // 48 writer.WriteUint(0); // 52 } else { writer.WriteUlong(hintRva); // 48 writer.WriteUlong(0); // 56 } // Hint table writer.WriteUshort(0); // Hint 54|58 string entryPointName = (_module.Kind == ModuleKind.DynamicallyLinkedLibrary || _module.Kind == ModuleKind.WindowsRuntimeMetadata) ? "_CorDllMain" : "_CorExeMain"; foreach (char ch in entryPointName) { writer.WriteByte((byte)ch); // 65|69 } writer.WriteByte(0); // 66|70 writer.BaseStream.WriteTo(peStream); } private static void WriteNameTable(Stream peStream) { BinaryWriter writer = new BinaryWriter(new MemoryStream(14)); foreach (char ch in "mscoree.dll") { writer.WriteByte((byte)ch); // 11 } writer.WriteByte(0); // 12 writer.WriteUshort(0); // 14 writer.BaseStream.WriteTo(peStream); } private static void WriteCorHeader(Stream peStream, CorHeader corHeader) { BinaryWriter writer = new BinaryWriter(new MemoryStream(72)); writer.WriteUint(72); // Number of bytes in this header 4 writer.WriteUshort(corHeader.MajorRuntimeVersion); // 6 writer.WriteUshort(corHeader.MinorRuntimeVersion); // 8 writer.WriteUint(corHeader.MetadataDirectory.RelativeVirtualAddress); // 12 writer.WriteUint(corHeader.MetadataDirectory.Size); // 16 writer.WriteUint((uint)corHeader.Flags); // 20 writer.WriteUint(corHeader.EntryPointToken); // 24 writer.WriteUint(corHeader.Resources.Size == 0 ? 0u : corHeader.Resources.RelativeVirtualAddress); // 28 writer.WriteUint(corHeader.Resources.Size); // 32 writer.WriteUint(corHeader.StrongNameSignature.Size == 0 ? 0u : corHeader.StrongNameSignature.RelativeVirtualAddress); // 36 writer.WriteUint(corHeader.StrongNameSignature.Size); // 40 writer.WriteUint(corHeader.CodeManagerTable.RelativeVirtualAddress); // 44 writer.WriteUint(corHeader.CodeManagerTable.Size); // 48 writer.WriteUint(corHeader.VTableFixups.RelativeVirtualAddress); // 52 writer.WriteUint(corHeader.VTableFixups.Size); // 56 writer.WriteUint(corHeader.ExportAddressTableJumps.RelativeVirtualAddress); // 60 writer.WriteUint(corHeader.ExportAddressTableJumps.Size); // 64 writer.WriteUlong(0); // 72 writer.BaseStream.WriteTo(peStream); } private static void WriteIL(Stream peStream, MemoryStream ilStream) { ilStream.WriteTo(peStream); while (peStream.Position % 4 != 0) { peStream.WriteByte(0); } } private static void WriteMappedFieldData(Stream peStream, MemoryStream dataStream) { dataStream.WriteTo(peStream); while (peStream.Position % 4 != 0) { peStream.WriteByte(0); } } private static void WriteSpaceForHash(Stream peStream, int strongNameSignatureSize) { while (strongNameSignatureSize > 0) { peStream.WriteByte(0); strongNameSignatureSize--; } } private static void WriteMetadata(Stream peStream, MemoryStream metadataStream) { metadataStream.WriteTo(peStream); while (peStream.Position % 4 != 0) { peStream.WriteByte(0); } } private static void WriteManagedResources(Stream peStream, MemoryStream managedResourceStream) { managedResourceStream.WriteTo(peStream); while (peStream.Position % 4 != 0) { peStream.WriteByte(0); } } private void WriteDebugTable(Stream peStream, ContentId pdbContentId, MetadataSizes metadataSizes) { if (!EmitPdb) { return; } MemoryStream stream = new MemoryStream(); BinaryWriter writer = new BinaryWriter(stream); // characteristics: writer.WriteUint(0); // PDB stamp writer.WriteBytes(pdbContentId.Stamp); // version writer.WriteUint(0); // type: const int ImageDebugTypeCodeView = 2; writer.WriteUint(ImageDebugTypeCodeView); // size of data: writer.WriteUint((uint)ComputeSizeOfDebugDirectoryData()); uint dataOffset = (uint)ComputeOffsetToDebugTable(metadataSizes) + ImageDebugDirectoryBaseSize; // PointerToRawData (RVA of the data): writer.WriteUint(_textSection.RelativeVirtualAddress + dataOffset); // AddressOfRawData (position of the data in the PE stream): writer.WriteUint(_textSection.PointerToRawData + dataOffset); writer.WriteByte((byte)'R'); writer.WriteByte((byte)'S'); writer.WriteByte((byte)'D'); writer.WriteByte((byte)'S'); // PDB id: writer.WriteBytes(pdbContentId.Guid); // age writer.WriteUint(PdbWriter.Age); // UTF-8 encoded zero-terminated path to PDB writer.WriteString(_pdbPathOpt, emitNullTerminator: true); writer.BaseStream.WriteTo(peStream); stream.Free(); } private void WriteRuntimeStartupStub(Stream peStream) { BinaryWriter writer = new BinaryWriter(new MemoryStream(16)); // entry point code, consisting of a jump indirect to _CorXXXMain if (!_module.Requires64bits) { //emit 0's (nops) to pad the entry point code so that the target address is aligned on a 4 byte boundary. for (uint i = 0, n = (uint)(BitArithmeticUtilities.Align((uint)peStream.Position, 4) - peStream.Position); i < n; i++) writer.WriteByte(0); writer.WriteUshort(0); writer.WriteByte(0xff); writer.WriteByte(0x25); //4 writer.WriteUint(_ntHeader.ImportAddressTable.RelativeVirtualAddress + (uint)_module.BaseAddress); //8 } else { //emit 0's (nops) to pad the entry point code so that the target address is aligned on a 8 byte boundary. for (uint i = 0, n = (uint)(BitArithmeticUtilities.Align((uint)peStream.Position, 8) - peStream.Position); i < n; i++) writer.WriteByte(0); writer.WriteUint(0); writer.WriteUshort(0); writer.WriteByte(0xff); writer.WriteByte(0x25); //8 writer.WriteUlong(_ntHeader.ImportAddressTable.RelativeVirtualAddress + _module.BaseAddress); //16 } writer.BaseStream.WriteTo(peStream); } private void WriteCoverSection(Stream peStream) { peStream.Position = _coverSection.PointerToRawData; _coverageDataWriter.BaseStream.WriteTo(peStream); } private void WriteRdataSection(Stream peStream) { peStream.Position = _rdataSection.PointerToRawData; _rdataWriter.BaseStream.WriteTo(peStream); } private void WriteSdataSection(Stream peStream) { peStream.Position = _sdataSection.PointerToRawData; _sdataWriter.BaseStream.WriteTo(peStream); } private void WriteRelocSection(Stream peStream) { if (!_emitRuntimeStartupStub) { //No need to write out a reloc section, but there is still a need to pad out the peStream so that it is an even multiple of module.FileAlignment if (_relocSection.PointerToRawData != peStream.Position) { //for example, the resource section did not end bang on the alignment boundary peStream.Position = _relocSection.PointerToRawData - 1; peStream.WriteByte(0); } return; } peStream.Position = _relocSection.PointerToRawData; BinaryWriter writer = new BinaryWriter(new MemoryStream(_module.FileAlignment)); writer.WriteUint(((_ntHeader.AddressOfEntryPoint + 2) / 0x1000) * 0x1000); writer.WriteUint(_module.Requires64bits && !_module.RequiresAmdInstructionSet ? 14u : 12u); uint offsetWithinPage = (_ntHeader.AddressOfEntryPoint + 2) % 0x1000; uint relocType = _module.Requires64bits ? 10u : 3u; ushort s = (ushort)((relocType << 12) | offsetWithinPage); writer.WriteUshort(s); if (_module.Requires64bits && !_module.RequiresAmdInstructionSet) { writer.WriteUint(relocType << 12); } writer.WriteUshort(0); // next chunk's RVA writer.BaseStream.Position = _module.FileAlignment; writer.BaseStream.WriteTo(peStream); } private void WriteResourceSection(Stream peStream) { if (_win32ResourceWriter.BaseStream.Length == 0) { return; } peStream.Position = _resourceSection.PointerToRawData; _win32ResourceWriter.BaseStream.WriteTo(peStream); peStream.WriteByte(0); while (peStream.Position % 8 != 0) { peStream.WriteByte(0); } } private void WriteTlsSection(Stream peStream) { peStream.Position = _tlsSection.PointerToRawData; _tlsDataWriter.BaseStream.WriteTo(peStream); } } }
apache-2.0
DimitrisAndreou/flexigraph
src/gr/forth/ics/graph/event/GraphListener.java
105
package gr.forth.ics.graph.event; public interface GraphListener extends NodeListener, EdgeListener { }
apache-2.0
k21/buck
test/com/facebook/buck/rules/CachingBuildEngineFactory.java
6351
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.rules; import com.facebook.buck.rules.keys.DefaultRuleKeyCache; import com.facebook.buck.rules.keys.RuleKeyFactories; import com.facebook.buck.step.DefaultStepRunner; import com.facebook.buck.testutil.DummyFileHashCache; import com.facebook.buck.util.cache.FileHashCacheMode; import com.facebook.buck.util.concurrent.ListeningMultiSemaphore; import com.facebook.buck.util.concurrent.ResourceAllocationFairness; import com.facebook.buck.util.concurrent.ResourceAmounts; import com.facebook.buck.util.concurrent.WeightedListeningExecutorService; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import java.util.Optional; /** Handy way to create new {@link CachingBuildEngine} instances for test purposes. */ public class CachingBuildEngineFactory { private CachingBuildEngine.BuildMode buildMode = CachingBuildEngine.BuildMode.SHALLOW; private CachingBuildEngine.MetadataStorage metadataStorage = CachingBuildEngine.MetadataStorage.FILESYSTEM; private CachingBuildEngine.DepFiles depFiles = CachingBuildEngine.DepFiles.ENABLED; private long maxDepFileCacheEntries = 256L; private Optional<Long> artifactCacheSizeLimit = Optional.empty(); private long inputFileSizeLimit = Long.MAX_VALUE; private Optional<RuleKeyFactories> ruleKeyFactories = Optional.empty(); private CachingBuildEngineDelegate cachingBuildEngineDelegate; private WeightedListeningExecutorService executorService; private BuildRuleResolver buildRuleResolver; private ResourceAwareSchedulingInfo resourceAwareSchedulingInfo = ResourceAwareSchedulingInfo.NON_AWARE_SCHEDULING_INFO; private boolean logBuildRuleFailuresInline = true; private BuildInfoStoreManager buildInfoStoreManager; private FileHashCacheMode fileHashCacheMode = FileHashCacheMode.DEFAULT; public CachingBuildEngineFactory( BuildRuleResolver buildRuleResolver, BuildInfoStoreManager buildInfoStoreManager) { this.cachingBuildEngineDelegate = new LocalCachingBuildEngineDelegate(new DummyFileHashCache()); this.executorService = toWeighted(MoreExecutors.newDirectExecutorService()); this.buildRuleResolver = buildRuleResolver; this.buildInfoStoreManager = buildInfoStoreManager; } public CachingBuildEngineFactory setBuildMode(CachingBuildEngine.BuildMode buildMode) { this.buildMode = buildMode; return this; } public CachingBuildEngineFactory setFileHashCachMode(FileHashCacheMode fileHashCachMode) { this.fileHashCacheMode = fileHashCachMode; return this; } public CachingBuildEngineFactory setDepFiles(CachingBuildEngine.DepFiles depFiles) { this.depFiles = depFiles; return this; } public CachingBuildEngineFactory setMaxDepFileCacheEntries(long maxDepFileCacheEntries) { this.maxDepFileCacheEntries = maxDepFileCacheEntries; return this; } public CachingBuildEngineFactory setArtifactCacheSizeLimit( Optional<Long> artifactCacheSizeLimit) { this.artifactCacheSizeLimit = artifactCacheSizeLimit; return this; } public CachingBuildEngineFactory setCachingBuildEngineDelegate( CachingBuildEngineDelegate cachingBuildEngineDelegate) { this.cachingBuildEngineDelegate = cachingBuildEngineDelegate; return this; } public CachingBuildEngineFactory setExecutorService(ListeningExecutorService executorService) { this.executorService = toWeighted(executorService); return this; } public CachingBuildEngineFactory setExecutorService( WeightedListeningExecutorService executorService) { this.executorService = executorService; return this; } public CachingBuildEngineFactory setRuleKeyFactories(RuleKeyFactories ruleKeyFactories) { this.ruleKeyFactories = Optional.of(ruleKeyFactories); return this; } public CachingBuildEngineFactory setLogBuildRuleFailuresInline( boolean logBuildRuleFailuresInline) { this.logBuildRuleFailuresInline = logBuildRuleFailuresInline; return this; } public CachingBuildEngine build() { if (ruleKeyFactories.isPresent()) { SourcePathRuleFinder ruleFinder = new SourcePathRuleFinder(buildRuleResolver); return new CachingBuildEngine( cachingBuildEngineDelegate, executorService, new DefaultStepRunner(), buildMode, metadataStorage, depFiles, maxDepFileCacheEntries, artifactCacheSizeLimit, buildRuleResolver, buildInfoStoreManager, ruleFinder, DefaultSourcePathResolver.from(ruleFinder), ruleKeyFactories.get(), resourceAwareSchedulingInfo, logBuildRuleFailuresInline, fileHashCacheMode); } return new CachingBuildEngine( cachingBuildEngineDelegate, executorService, new DefaultStepRunner(), buildMode, metadataStorage, depFiles, maxDepFileCacheEntries, artifactCacheSizeLimit, buildRuleResolver, buildInfoStoreManager, resourceAwareSchedulingInfo, logBuildRuleFailuresInline, RuleKeyFactories.of( 0, cachingBuildEngineDelegate.getFileHashCache(), buildRuleResolver, inputFileSizeLimit, new DefaultRuleKeyCache<>()), fileHashCacheMode); } private static WeightedListeningExecutorService toWeighted(ListeningExecutorService service) { return new WeightedListeningExecutorService( new ListeningMultiSemaphore( ResourceAmounts.of(Integer.MAX_VALUE, 0, 0, 0), ResourceAllocationFairness.FAIR), /* defaultPermits */ ResourceAmounts.of(1, 0, 0, 0), service); } }
apache-2.0
khartec/waltz
waltz-jobs/src/main/java/org/finos/waltz/jobs/harness/MeasurableRatingHarness.java
2714
/* * Waltz - Enterprise Architecture * Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project * See README.md for more information * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific * */ package org.finos.waltz.jobs.harness; import org.finos.waltz.data.measurable.MeasurableIdSelectorFactory; import org.finos.waltz.data.measurable_rating.MeasurableRatingDao; import org.finos.waltz.model.EntityReference; import org.finos.waltz.model.IdSelectionOptions; import org.finos.waltz.model.tally.MeasurableRatingTally; import org.finos.waltz.model.tally.Tally; import org.finos.waltz.service.DIConfiguration; import org.jooq.Record1; import org.jooq.Select; import org.jooq.tools.json.ParseException; import org.springframework.context.annotation.AnnotationConfigApplicationContext; import java.util.List; import static org.finos.waltz.model.EntityKind.MEASURABLE; import static org.finos.waltz.model.EntityReference.mkRef; import static org.finos.waltz.model.HierarchyQueryScope.CHILDREN; public class MeasurableRatingHarness { public static void main(String[] args) throws ParseException { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(DIConfiguration.class); MeasurableRatingDao measurableRatingDao = ctx.getBean(MeasurableRatingDao.class); MeasurableIdSelectorFactory measurableIdSelectorFactory = new MeasurableIdSelectorFactory(); EntityReference direct = mkRef(MEASURABLE, 18310); EntityReference indirect = mkRef(MEASURABLE, 18064); IdSelectionOptions directOpts = IdSelectionOptions.mkOpts(direct, CHILDREN); IdSelectionOptions indirectOpts = IdSelectionOptions.mkOpts(indirect, CHILDREN); Select<Record1<Long>> directSelector = measurableIdSelectorFactory.apply(directOpts); Select<Record1<Long>> indirectSelector = measurableIdSelectorFactory.apply(indirectOpts); List<MeasurableRatingTally> directTallies = measurableRatingDao.statsForRelatedMeasurable(directSelector); List<MeasurableRatingTally> indirectTallies = measurableRatingDao.statsForRelatedMeasurable(indirectSelector); List<Tally<Long>> tallies = measurableRatingDao.tallyByMeasurableCategoryId(1L); System.out.println(tallies); } }
apache-2.0
khartec/waltz
waltz-integration-test/src/test/java/org/finos/waltz/integration_test/inmem/helpers/InvolvementHelper.java
2071
package org.finos.waltz.integration_test.inmem.helpers; import org.finos.waltz.model.EntityKind; import org.finos.waltz.model.EntityReference; import org.finos.waltz.model.Operation; import org.finos.waltz.model.involvement.EntityInvolvementChangeCommand; import org.finos.waltz.model.involvement.ImmutableEntityInvolvementChangeCommand; import org.finos.waltz.model.involvement_kind.ImmutableInvolvementKindCreateCommand; import org.finos.waltz.model.involvement_kind.InvolvementKindCreateCommand; import org.finos.waltz.service.involvement.InvolvementService; import org.finos.waltz.service.involvement_kind.InvolvementKindService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import static org.finos.waltz.model.EntityReference.mkRef; @Service public class InvolvementHelper { private final InvolvementService involvementService; private final InvolvementKindService involvementKindService; @Autowired public InvolvementHelper(InvolvementService involvementService, InvolvementKindService involvementKindService) { this.involvementService = involvementService; this.involvementKindService = involvementKindService; } public long mkInvolvementKind(String name) { InvolvementKindCreateCommand cmd = ImmutableInvolvementKindCreateCommand.builder() .description(name) .name(name) .externalId(name) .build(); return involvementKindService.create(cmd, NameHelper.mkUserId("involvementHelper")); } public void createInvolvement(Long pId, long invId, EntityReference entity) { EntityInvolvementChangeCommand cmd = ImmutableEntityInvolvementChangeCommand.builder() .involvementKindId((int) invId) .personEntityRef(mkRef(EntityKind.PERSON, pId)) .operation(Operation.ADD) .build(); involvementService.addEntityInvolvement(NameHelper.mkUserId(), entity, cmd); } }
apache-2.0
DPSablowski/croco
build-CroCo-Desktop_Qt_5_8_0_GCC_64bit-Release/ui_moments.h
12132
/******************************************************************************** ** Form generated from reading UI file 'moments.ui' ** ** Created by: Qt User Interface Compiler version 5.8.0 ** ** WARNING! All changes made in this file will be lost when recompiling UI file! ********************************************************************************/ #ifndef UI_MOMENTS_H #define UI_MOMENTS_H #include <QtCore/QVariant> #include <QtWidgets/QAction> #include <QtWidgets/QApplication> #include <QtWidgets/QButtonGroup> #include <QtWidgets/QCheckBox> #include <QtWidgets/QDialog> #include <QtWidgets/QFrame> #include <QtWidgets/QHBoxLayout> #include <QtWidgets/QHeaderView> #include <QtWidgets/QLabel> #include <QtWidgets/QLineEdit> #include <QtWidgets/QPushButton> #include <QtWidgets/QSpacerItem> #include <QtWidgets/QSpinBox> #include <QtWidgets/QTableWidget> #include <QtWidgets/QVBoxLayout> QT_BEGIN_NAMESPACE class Ui_Moments { public: QHBoxLayout *horizontalLayout_9; QVBoxLayout *verticalLayout; QHBoxLayout *horizontalLayout; QLabel *label; QLineEdit *lineEdit; QHBoxLayout *horizontalLayout_2; QLabel *label_2; QLineEdit *lineEdit_2; QHBoxLayout *horizontalLayout_6; QLabel *label_5; QLineEdit *lineEdit_4; QHBoxLayout *horizontalLayout_3; QLabel *label_3; QLineEdit *lineEdit_3; QHBoxLayout *horizontalLayout_4; QLabel *label_4; QSpinBox *spinBox; QPushButton *pushButton_2; QFrame *line; QHBoxLayout *horizontalLayout_7; QPushButton *pushButton_3; QLineEdit *lineEdit_5; QCheckBox *checkBox; QSpinBox *spinBox_4; QLabel *label_9; QHBoxLayout *horizontalLayout_8; QPushButton *pushButton_4; QLineEdit *lineEdit_6; QLabel *label_6; QSpinBox *spinBox_2; QLabel *label_7; QSpinBox *spinBox_3; QLabel *label_8; QLineEdit *lineEdit_7; QSpacerItem *verticalSpacer; QHBoxLayout *horizontalLayout_5; QSpacerItem *horizontalSpacer; QPushButton *pushButton; QTableWidget *tableWidget; void setupUi(QDialog *Moments) { if (Moments->objectName().isEmpty()) Moments->setObjectName(QStringLiteral("Moments")); Moments->resize(917, 516); horizontalLayout_9 = new QHBoxLayout(Moments); horizontalLayout_9->setObjectName(QStringLiteral("horizontalLayout_9")); verticalLayout = new QVBoxLayout(); verticalLayout->setObjectName(QStringLiteral("verticalLayout")); horizontalLayout = new QHBoxLayout(); horizontalLayout->setObjectName(QStringLiteral("horizontalLayout")); label = new QLabel(Moments); label->setObjectName(QStringLiteral("label")); horizontalLayout->addWidget(label); lineEdit = new QLineEdit(Moments); lineEdit->setObjectName(QStringLiteral("lineEdit")); QSizePolicy sizePolicy(QSizePolicy::Maximum, QSizePolicy::Fixed); sizePolicy.setHorizontalStretch(0); sizePolicy.setVerticalStretch(0); sizePolicy.setHeightForWidth(lineEdit->sizePolicy().hasHeightForWidth()); lineEdit->setSizePolicy(sizePolicy); horizontalLayout->addWidget(lineEdit); verticalLayout->addLayout(horizontalLayout); horizontalLayout_2 = new QHBoxLayout(); horizontalLayout_2->setObjectName(QStringLiteral("horizontalLayout_2")); label_2 = new QLabel(Moments); label_2->setObjectName(QStringLiteral("label_2")); horizontalLayout_2->addWidget(label_2); lineEdit_2 = new QLineEdit(Moments); lineEdit_2->setObjectName(QStringLiteral("lineEdit_2")); sizePolicy.setHeightForWidth(lineEdit_2->sizePolicy().hasHeightForWidth()); lineEdit_2->setSizePolicy(sizePolicy); horizontalLayout_2->addWidget(lineEdit_2); verticalLayout->addLayout(horizontalLayout_2); horizontalLayout_6 = new QHBoxLayout(); horizontalLayout_6->setObjectName(QStringLiteral("horizontalLayout_6")); label_5 = new QLabel(Moments); label_5->setObjectName(QStringLiteral("label_5")); QSizePolicy sizePolicy1(QSizePolicy::Preferred, QSizePolicy::Preferred); sizePolicy1.setHorizontalStretch(0); sizePolicy1.setVerticalStretch(0); sizePolicy1.setHeightForWidth(label_5->sizePolicy().hasHeightForWidth()); label_5->setSizePolicy(sizePolicy1); horizontalLayout_6->addWidget(label_5); lineEdit_4 = new QLineEdit(Moments); lineEdit_4->setObjectName(QStringLiteral("lineEdit_4")); sizePolicy.setHeightForWidth(lineEdit_4->sizePolicy().hasHeightForWidth()); lineEdit_4->setSizePolicy(sizePolicy); horizontalLayout_6->addWidget(lineEdit_4); verticalLayout->addLayout(horizontalLayout_6); horizontalLayout_3 = new QHBoxLayout(); horizontalLayout_3->setObjectName(QStringLiteral("horizontalLayout_3")); label_3 = new QLabel(Moments); label_3->setObjectName(QStringLiteral("label_3")); horizontalLayout_3->addWidget(label_3); lineEdit_3 = new QLineEdit(Moments); lineEdit_3->setObjectName(QStringLiteral("lineEdit_3")); sizePolicy.setHeightForWidth(lineEdit_3->sizePolicy().hasHeightForWidth()); lineEdit_3->setSizePolicy(sizePolicy); horizontalLayout_3->addWidget(lineEdit_3); verticalLayout->addLayout(horizontalLayout_3); horizontalLayout_4 = new QHBoxLayout(); horizontalLayout_4->setObjectName(QStringLiteral("horizontalLayout_4")); label_4 = new QLabel(Moments); label_4->setObjectName(QStringLiteral("label_4")); horizontalLayout_4->addWidget(label_4); spinBox = new QSpinBox(Moments); spinBox->setObjectName(QStringLiteral("spinBox")); horizontalLayout_4->addWidget(spinBox); pushButton_2 = new QPushButton(Moments); pushButton_2->setObjectName(QStringLiteral("pushButton_2")); sizePolicy.setHeightForWidth(pushButton_2->sizePolicy().hasHeightForWidth()); pushButton_2->setSizePolicy(sizePolicy); horizontalLayout_4->addWidget(pushButton_2); verticalLayout->addLayout(horizontalLayout_4); line = new QFrame(Moments); line->setObjectName(QStringLiteral("line")); line->setFrameShape(QFrame::HLine); line->setFrameShadow(QFrame::Sunken); verticalLayout->addWidget(line); horizontalLayout_7 = new QHBoxLayout(); horizontalLayout_7->setObjectName(QStringLiteral("horizontalLayout_7")); pushButton_3 = new QPushButton(Moments); pushButton_3->setObjectName(QStringLiteral("pushButton_3")); sizePolicy.setHeightForWidth(pushButton_3->sizePolicy().hasHeightForWidth()); pushButton_3->setSizePolicy(sizePolicy); horizontalLayout_7->addWidget(pushButton_3); lineEdit_5 = new QLineEdit(Moments); lineEdit_5->setObjectName(QStringLiteral("lineEdit_5")); sizePolicy.setHeightForWidth(lineEdit_5->sizePolicy().hasHeightForWidth()); lineEdit_5->setSizePolicy(sizePolicy); horizontalLayout_7->addWidget(lineEdit_5); checkBox = new QCheckBox(Moments); checkBox->setObjectName(QStringLiteral("checkBox")); horizontalLayout_7->addWidget(checkBox); spinBox_4 = new QSpinBox(Moments); spinBox_4->setObjectName(QStringLiteral("spinBox_4")); horizontalLayout_7->addWidget(spinBox_4); label_9 = new QLabel(Moments); label_9->setObjectName(QStringLiteral("label_9")); horizontalLayout_7->addWidget(label_9); verticalLayout->addLayout(horizontalLayout_7); horizontalLayout_8 = new QHBoxLayout(); horizontalLayout_8->setObjectName(QStringLiteral("horizontalLayout_8")); pushButton_4 = new QPushButton(Moments); pushButton_4->setObjectName(QStringLiteral("pushButton_4")); horizontalLayout_8->addWidget(pushButton_4); lineEdit_6 = new QLineEdit(Moments); lineEdit_6->setObjectName(QStringLiteral("lineEdit_6")); sizePolicy.setHeightForWidth(lineEdit_6->sizePolicy().hasHeightForWidth()); lineEdit_6->setSizePolicy(sizePolicy); horizontalLayout_8->addWidget(lineEdit_6); label_6 = new QLabel(Moments); label_6->setObjectName(QStringLiteral("label_6")); horizontalLayout_8->addWidget(label_6); spinBox_2 = new QSpinBox(Moments); spinBox_2->setObjectName(QStringLiteral("spinBox_2")); horizontalLayout_8->addWidget(spinBox_2); label_7 = new QLabel(Moments); label_7->setObjectName(QStringLiteral("label_7")); horizontalLayout_8->addWidget(label_7); spinBox_3 = new QSpinBox(Moments); spinBox_3->setObjectName(QStringLiteral("spinBox_3")); horizontalLayout_8->addWidget(spinBox_3); label_8 = new QLabel(Moments); label_8->setObjectName(QStringLiteral("label_8")); horizontalLayout_8->addWidget(label_8); lineEdit_7 = new QLineEdit(Moments); lineEdit_7->setObjectName(QStringLiteral("lineEdit_7")); sizePolicy.setHeightForWidth(lineEdit_7->sizePolicy().hasHeightForWidth()); lineEdit_7->setSizePolicy(sizePolicy); lineEdit_7->setMaximumSize(QSize(16777215, 16777215)); horizontalLayout_8->addWidget(lineEdit_7); verticalLayout->addLayout(horizontalLayout_8); verticalSpacer = new QSpacerItem(20, 40, QSizePolicy::Minimum, QSizePolicy::Expanding); verticalLayout->addItem(verticalSpacer); horizontalLayout_5 = new QHBoxLayout(); horizontalLayout_5->setObjectName(QStringLiteral("horizontalLayout_5")); horizontalSpacer = new QSpacerItem(40, 20, QSizePolicy::Preferred, QSizePolicy::Minimum); horizontalLayout_5->addItem(horizontalSpacer); pushButton = new QPushButton(Moments); pushButton->setObjectName(QStringLiteral("pushButton")); sizePolicy.setHeightForWidth(pushButton->sizePolicy().hasHeightForWidth()); pushButton->setSizePolicy(sizePolicy); horizontalLayout_5->addWidget(pushButton); verticalLayout->addLayout(horizontalLayout_5); horizontalLayout_9->addLayout(verticalLayout); tableWidget = new QTableWidget(Moments); tableWidget->setObjectName(QStringLiteral("tableWidget")); horizontalLayout_9->addWidget(tableWidget); retranslateUi(Moments); QObject::connect(pushButton, SIGNAL(clicked()), Moments, SLOT(close())); QMetaObject::connectSlotsByName(Moments); } // setupUi void retranslateUi(QDialog *Moments) { Moments->setWindowTitle(QApplication::translate("Moments", "Dialog", Q_NULLPTR)); label->setText(QApplication::translate("Moments", "Work Path:", Q_NULLPTR)); label_2->setText(QApplication::translate("Moments", "Moments In:", Q_NULLPTR)); label_5->setText(QApplication::translate("Moments", "Moments 2 In:", Q_NULLPTR)); label_3->setText(QApplication::translate("Moments", "Moments Out:", Q_NULLPTR)); label_4->setText(QApplication::translate("Moments", "# Sets:", Q_NULLPTR)); pushButton_2->setText(QApplication::translate("Moments", "Correct", Q_NULLPTR)); pushButton_3->setText(QApplication::translate("Moments", "Merge to", Q_NULLPTR)); checkBox->setText(QApplication::translate("Moments", "ignore first", Q_NULLPTR)); label_9->setText(QApplication::translate("Moments", "lines", Q_NULLPTR)); pushButton_4->setText(QApplication::translate("Moments", "Combine", Q_NULLPTR)); label_6->setText(QApplication::translate("Moments", "from", Q_NULLPTR)); label_7->setText(QApplication::translate("Moments", "to", Q_NULLPTR)); label_8->setText(QApplication::translate("Moments", "*", Q_NULLPTR)); pushButton->setText(QApplication::translate("Moments", "Close", Q_NULLPTR)); } // retranslateUi }; namespace Ui { class Moments: public Ui_Moments {}; } // namespace Ui QT_END_NAMESPACE #endif // UI_MOMENTS_H
apache-2.0
tlkzzz/xpjfx
src/main/java/com/tlkzzz/jeesite/modules/sys/service/DictService.java
1144
/** * Copyright &copy; 2012-2016 <a href="https://github.com/tlkzzz/jeesite">JeeSite</a> All rights reserved. */ package com.tlkzzz.jeesite.modules.sys.service; import java.util.List; import com.tlkzzz.jeesite.common.service.CrudService; import com.tlkzzz.jeesite.common.utils.CacheUtils; import com.tlkzzz.jeesite.modules.sys.entity.Dict; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.tlkzzz.jeesite.modules.sys.dao.DictDao; import com.tlkzzz.jeesite.modules.sys.utils.DictUtils; /** * 字典Service * @author tlkzzz * @version 2014-05-16 */ @Service @Transactional(readOnly = true) public class DictService extends CrudService<DictDao, Dict> { /** * 查询字段类型列表 * @return */ public List<String> findTypeList(){ return dao.findTypeList(new Dict()); } @Transactional(readOnly = false) public void save(Dict dict) { super.save(dict); CacheUtils.remove(DictUtils.CACHE_DICT_MAP); } @Transactional(readOnly = false) public void delete(Dict dict) { super.delete(dict); CacheUtils.remove(DictUtils.CACHE_DICT_MAP); } }
apache-2.0
trekawek/jackrabbit-oak
oak-core/src/test/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/PrincipalACLTest.java
4767
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.oak.security.authorization.accesscontrol; import java.security.Principal; import javax.jcr.PropertyType; import javax.jcr.RepositoryException; import javax.jcr.UnsupportedRepositoryOperationException; import javax.jcr.ValueFactory; import javax.jcr.security.AccessControlEntry; import javax.jcr.security.AccessControlList; import javax.jcr.security.AccessControlPolicy; import com.google.common.collect.ImmutableMap; import org.apache.jackrabbit.api.security.JackrabbitAccessControlManager; import org.apache.jackrabbit.api.security.principal.JackrabbitPrincipal; import org.apache.jackrabbit.commons.jackrabbit.authorization.AccessControlUtils; import org.apache.jackrabbit.oak.spi.security.principal.EveryonePrincipal; import org.apache.jackrabbit.oak.spi.security.principal.PrincipalImpl; import org.jetbrains.annotations.NotNull; import org.junit.Before; import org.junit.Test; import static org.apache.jackrabbit.oak.spi.security.authorization.accesscontrol.AccessControlConstants.REP_GLOB; import static org.apache.jackrabbit.oak.spi.security.authorization.accesscontrol.AccessControlConstants.REP_NODE_PATH; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; public class PrincipalACLTest extends AbstractAccessControlTest { private ACL principalAcl; @Override @Before public void before() throws Exception { super.before(); JackrabbitAccessControlManager acMgr = getAccessControlManager(root); AccessControlList policy = AccessControlUtils.getAccessControlList(acMgr, TEST_PATH); policy.addAccessControlEntry(testPrincipal, testPrivileges); policy.addAccessControlEntry(EveryonePrincipal.getInstance(), testPrivileges); acMgr.setPolicy(TEST_PATH, policy); root.commit(); principalAcl = getPrincipalAcl(acMgr, testPrincipal); } @NotNull private static ACL getPrincipalAcl(@NotNull JackrabbitAccessControlManager acMgr, @NotNull Principal testPrincipal) throws RepositoryException { for (AccessControlPolicy acp : acMgr.getPolicies(testPrincipal)) { if (acp instanceof ACL) { return (ACL) acp; } } throw new RuntimeException("no principal acl found"); } @Test(expected = UnsupportedRepositoryOperationException.class) public void testReorder() throws Exception { AccessControlEntry[] entries = principalAcl.getAccessControlEntries(); principalAcl.orderBefore(entries[0], null); } @Test public void testEquals() throws Exception { assertEquals(principalAcl, principalAcl); assertEquals(principalAcl, getPrincipalAcl(getAccessControlManager(root), testPrincipal)); } @Test public void testEqualsDifferentPrincipal() throws Exception { assertNotEquals(principalAcl, getPrincipalAcl(getAccessControlManager(root), EveryonePrincipal.getInstance())); } @Test public void testEqualsDifferentACL() throws Exception { assertNotEquals(principalAcl, AccessControlUtils.getAccessControlList(getAccessControlManager(root), TEST_PATH)); } @Test public void testEqualsDifferentPath() throws Exception { ACL acl = getPrincipalAcl(getAccessControlManager(root), new PrincipalImpl(testPrincipal.getName())); assertNotEquals(principalAcl, acl); } @Test public void testEqualsDifferentEntries() throws Exception { ValueFactory vf = getValueFactory(root); ACL acl = getPrincipalAcl(getAccessControlManager(root), testPrincipal); acl.addEntry(testPrincipal, privilegesFromNames(JCR_VERSION_MANAGEMENT), true, ImmutableMap.of(REP_GLOB, vf.createValue("/subtree/*"), REP_NODE_PATH, vf.createValue(TEST_PATH))); assertNotEquals(principalAcl, acl); } @Test public void testHashCode() { assertEquals(0, principalAcl.hashCode()); } }
apache-2.0
ErikKringen/kafka
core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala
19816
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util.Collections import org.apache.kafka.common.metrics.{MetricConfig, Metrics, Quota} import org.apache.kafka.common.utils.MockTime import org.junit.Assert.{assertEquals, assertTrue} import org.junit.{Before, Test} import kafka.admin.ConfigCommand class ClientQuotaManagerTest { private val time = new MockTime private val config = ClientQuotaManagerConfig(quotaBytesPerSecondDefault = 500) var numCallbacks: Int = 0 def callback(delayTimeMs: Int) { numCallbacks += 1 } @Before def beforeMethod() { numCallbacks = 0 } private def testQuotaParsing(config: ClientQuotaManagerConfig, client1: UserClient, client2: UserClient, randomClient: UserClient, defaultConfigClient: UserClient) { val clientMetrics = new ClientQuotaManager(config, newMetrics, QuotaType.Produce, time) try { // Case 1: Update the quota. Assert that the new quota value is returned clientMetrics.updateQuota(client1.configUser, client1.configClientId, Some(new Quota(2000, true))) clientMetrics.updateQuota(client2.configUser, client2.configClientId, Some(new Quota(4000, true))) assertEquals("Default producer quota should be " + config.quotaBytesPerSecondDefault, new Quota(config.quotaBytesPerSecondDefault, true), clientMetrics.quota(randomClient.user, randomClient.clientId)) assertEquals("Should return the overridden value (2000)", new Quota(2000, true), clientMetrics.quota(client1.user, client1.clientId)) assertEquals("Should return the overridden value (4000)", new Quota(4000, true), clientMetrics.quota(client2.user, client2.clientId)) // p1 should be throttled using the overridden quota var throttleTimeMs = clientMetrics.maybeRecordAndThrottle(client1.user, client1.clientId, 2500 * config.numQuotaSamples, this.callback) assertTrue(s"throttleTimeMs should be > 0. was $throttleTimeMs", throttleTimeMs > 0) // Case 2: Change quota again. The quota should be updated within KafkaMetrics as well since the sensor was created. // p1 should not longer be throttled after the quota change clientMetrics.updateQuota(client1.configUser, client1.configClientId, Some(new Quota(3000, true))) assertEquals("Should return the newly overridden value (3000)", new Quota(3000, true), clientMetrics.quota(client1.user, client1.clientId)) throttleTimeMs = clientMetrics.maybeRecordAndThrottle(client1.user, client1.clientId, 0, this.callback) assertEquals(s"throttleTimeMs should be 0. was $throttleTimeMs", 0, throttleTimeMs) // Case 3: Change quota back to default. Should be throttled again clientMetrics.updateQuota(client1.configUser, client1.configClientId, Some(new Quota(500, true))) assertEquals("Should return the default value (500)", new Quota(500, true), clientMetrics.quota(client1.user, client1.clientId)) throttleTimeMs = clientMetrics.maybeRecordAndThrottle(client1.user, client1.clientId, 0, this.callback) assertTrue(s"throttleTimeMs should be > 0. was $throttleTimeMs", throttleTimeMs > 0) // Case 4: Set high default quota, remove p1 quota. p1 should no longer be throttled clientMetrics.updateQuota(client1.configUser, client1.configClientId, None) clientMetrics.updateQuota(defaultConfigClient.configUser, defaultConfigClient.configClientId, Some(new Quota(4000, true))) assertEquals("Should return the newly overridden value (4000)", new Quota(4000, true), clientMetrics.quota(client1.user, client1.clientId)) throttleTimeMs = clientMetrics.maybeRecordAndThrottle(client1.user, client1.clientId, 1000 * config.numQuotaSamples, this.callback) assertEquals(s"throttleTimeMs should be 0. was $throttleTimeMs", 0, throttleTimeMs) } finally { clientMetrics.shutdown() } } /** * Tests parsing for <client-id> quotas. * Quota overrides persisted in Zookeeper in /config/clients/<client-id>, default persisted in /config/clients/<default> */ @Test def testClientIdQuotaParsing() { val client1 = UserClient("ANONYMOUS", "p1", None, Some("p1")) val client2 = UserClient("ANONYMOUS", "p2", None, Some("p2")) val randomClient = UserClient("ANONYMOUS", "random-client-id", None, None) val defaultConfigClient = UserClient("", "", None, Some(ConfigEntityName.Default)) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } /** * Tests parsing for <user> quotas. * Quota overrides persisted in Zookeeper in /config/users/<user>, default persisted in /config/users/<default> */ @Test def testUserQuotaParsing() { val client1 = UserClient("User1", "p1", Some("User1"), None) val client2 = UserClient("User2", "p2", Some("User2"), None) val randomClient = UserClient("RandomUser", "random-client-id", None, None) val defaultConfigClient = UserClient("", "", Some(ConfigEntityName.Default), None) val config = ClientQuotaManagerConfig(quotaBytesPerSecondDefault = Long.MaxValue) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } /** * Tests parsing for <user, client-id> quotas. * Quotas persisted in Zookeeper in /config/users/<user>/clients/<client-id>, default in /config/users/<default>/clients/<default> */ @Test def testUserClientIdQuotaParsing() { val client1 = UserClient("User1", "p1", Some("User1"), Some("p1")) val client2 = UserClient("User2", "p2", Some("User2"), Some("p2")) val randomClient = UserClient("RandomUser", "random-client-id", None, None) val defaultConfigClient = UserClient("", "", Some(ConfigEntityName.Default), Some(ConfigEntityName.Default)) val config = ClientQuotaManagerConfig(quotaBytesPerSecondDefault = Long.MaxValue) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } /** * Tests parsing for <user> quotas when client-id default quota properties are set. */ @Test def testUserQuotaParsingWithDefaultClientIdQuota() { val client1 = UserClient("User1", "p1", Some("User1"), None) val client2 = UserClient("User2", "p2", Some("User2"), None) val randomClient = UserClient("RandomUser", "random-client-id", None, None) val defaultConfigClient = UserClient("", "", Some(ConfigEntityName.Default), None) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } /** * Tests parsing for <user, client-id> quotas when client-id default quota properties are set. */ @Test def testUserClientQuotaParsingIdWithDefaultClientIdQuota() { val client1 = UserClient("User1", "p1", Some("User1"), Some("p1")) val client2 = UserClient("User2", "p2", Some("User2"), Some("p2")) val randomClient = UserClient("RandomUser", "random-client-id", None, None) val defaultConfigClient = UserClient("", "", Some(ConfigEntityName.Default), Some(ConfigEntityName.Default)) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } @Test def testQuotaConfigPrecedence() { val quotaManager = new ClientQuotaManager(ClientQuotaManagerConfig(quotaBytesPerSecondDefault=Long.MaxValue), newMetrics, QuotaType.Produce, time) def checkQuota(user: String, clientId: String, expectedBound: Int, value: Int, expectThrottle: Boolean) { assertEquals(new Quota(expectedBound, true), quotaManager.quota(user, clientId)) val throttleTimeMs = quotaManager.maybeRecordAndThrottle(user, clientId, value * config.numQuotaSamples, this.callback) if (expectThrottle) assertTrue(s"throttleTimeMs should be > 0. was $throttleTimeMs", throttleTimeMs > 0) else assertEquals(s"throttleTimeMs should be 0. was $throttleTimeMs", 0, throttleTimeMs) } try { quotaManager.updateQuota(Some(ConfigEntityName.Default), None, Some(new Quota(1000, true))) quotaManager.updateQuota(None, Some(ConfigEntityName.Default), Some(new Quota(2000, true))) quotaManager.updateQuota(Some(ConfigEntityName.Default), Some(ConfigEntityName.Default), Some(new Quota(3000, true))) quotaManager.updateQuota(Some("userA"), None, Some(new Quota(4000, true))) quotaManager.updateQuota(Some("userA"), Some("client1"), Some(new Quota(5000, true))) quotaManager.updateQuota(Some("userB"), None, Some(new Quota(6000, true))) quotaManager.updateQuota(Some("userB"), Some("client1"), Some(new Quota(7000, true))) quotaManager.updateQuota(Some("userB"), Some(ConfigEntityName.Default), Some(new Quota(8000, true))) quotaManager.updateQuota(Some("userC"), None, Some(new Quota(10000, true))) quotaManager.updateQuota(None, Some("client1"), Some(new Quota(9000, true))) checkQuota("userA", "client1", 5000, 4500, false) // <user, client> quota takes precedence over <user> checkQuota("userA", "client2", 4000, 4500, true) // <user> quota takes precedence over <client> and defaults checkQuota("userA", "client3", 4000, 0, true) // <user> quota is shared across clients of user checkQuota("userA", "client1", 5000, 0, false) // <user, client> is exclusive use, unaffected by other clients checkQuota("userB", "client1", 7000, 8000, true) checkQuota("userB", "client2", 8000, 7000, false) // Default per-client quota for exclusive use of <user, client> checkQuota("userB", "client3", 8000, 7000, false) checkQuota("userD", "client1", 3000, 3500, true) // Default <user, client> quota checkQuota("userD", "client2", 3000, 2500, false) checkQuota("userE", "client1", 3000, 2500, false) // Remove default <user, client> quota config, revert to <user> default quotaManager.updateQuota(Some(ConfigEntityName.Default), Some(ConfigEntityName.Default), None) checkQuota("userD", "client1", 1000, 0, false) // Metrics tags changed, restart counter checkQuota("userE", "client4", 1000, 1500, true) checkQuota("userF", "client4", 1000, 800, false) // Default <user> quota shared across clients of user checkQuota("userF", "client5", 1000, 800, true) // Remove default <user> quota config, revert to <client-id> default quotaManager.updateQuota(Some(ConfigEntityName.Default), None, None) checkQuota("userF", "client4", 2000, 0, false) // Default <client-id> quota shared across client-id of all users checkQuota("userF", "client5", 2000, 0, false) checkQuota("userF", "client5", 2000, 2500, true) checkQuota("userG", "client5", 2000, 0, true) // Update quotas quotaManager.updateQuota(Some("userA"), None, Some(new Quota(8000, true))) quotaManager.updateQuota(Some("userA"), Some("client1"), Some(new Quota(10000, true))) checkQuota("userA", "client2", 8000, 0, false) checkQuota("userA", "client2", 8000, 4500, true) // Throttled due to sum of new and earlier values checkQuota("userA", "client1", 10000, 0, false) checkQuota("userA", "client1", 10000, 6000, true) quotaManager.updateQuota(Some("userA"), Some("client1"), None) checkQuota("userA", "client6", 8000, 0, true) // Throttled due to shared user quota quotaManager.updateQuota(Some("userA"), Some("client6"), Some(new Quota(11000, true))) checkQuota("userA", "client6", 11000, 8500, false) quotaManager.updateQuota(Some("userA"), Some(ConfigEntityName.Default), Some(new Quota(12000, true))) quotaManager.updateQuota(Some("userA"), Some("client6"), None) checkQuota("userA", "client6", 12000, 4000, true) // Throttled due to sum of new and earlier values } finally { quotaManager.shutdown() } } @Test def testQuotaViolation() { val metrics = newMetrics val clientMetrics = new ClientQuotaManager(config, metrics, QuotaType.Produce, time) val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", "Produce", "")) try { /* We have 10 second windows. Make sure that there is no quota violation * if we produce under the quota */ for (_ <- 0 until 10) { clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "unknown", 400, callback) time.sleep(1000) } assertEquals(10, numCallbacks) assertEquals(0, queueSizeMetric.value().toInt) // Create a spike. // 400*10 + 2000 + 300 = 6300/10.5 = 600 bytes per second. // (600 - quota)/quota*window-size = (600-500)/500*10.5 seconds = 2100 // 10.5 seconds because the last window is half complete time.sleep(500) val sleepTime = clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "unknown", 2300, callback) assertEquals("Should be throttled", 2100, sleepTime) assertEquals(1, queueSizeMetric.value().toInt) // After a request is delayed, the callback cannot be triggered immediately clientMetrics.throttledRequestReaper.doWork() assertEquals(10, numCallbacks) time.sleep(sleepTime) // Callback can only be triggered after the delay time passes clientMetrics.throttledRequestReaper.doWork() assertEquals(0, queueSizeMetric.value().toInt) assertEquals(11, numCallbacks) // Could continue to see delays until the bursty sample disappears for (_ <- 0 until 10) { clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "unknown", 400, callback) time.sleep(1000) } assertEquals("Should be unthrottled since bursty sample has rolled over", 0, clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "unknown", 0, callback)) } finally { clientMetrics.shutdown() } } @Test def testRequestPercentageQuotaViolation() { val metrics = newMetrics val quotaManager = new ClientRequestQuotaManager(config, metrics, time) quotaManager.updateQuota(Some("ANONYMOUS"), Some("test-client"), Some(Quota.upperBound(1))) val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", "Request", "")) def millisToPercent(millis: Double) = millis * 1000 * 1000 * ClientQuotaManagerConfig.NanosToPercentagePerSecond try { /* We have 10 second windows. Make sure that there is no quota violation * if we are under the quota */ for (_ <- 0 until 10) { quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", millisToPercent(4), callback) time.sleep(1000) } assertEquals(10, numCallbacks) assertEquals(0, queueSizeMetric.value().toInt) // Create a spike. // quota = 1% (10ms per second) // 4*10 + 67.1 = 107.1/10.5 = 10.2ms per second. // (10.2 - quota)/quota*window-size = (10.2-10)/10*10.5 seconds = 210ms // 10.5 seconds interval because the last window is half complete time.sleep(500) val throttleTime = quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", millisToPercent(67.1), callback) assertEquals("Should be throttled", 210, throttleTime) assertEquals(1, queueSizeMetric.value().toInt) // After a request is delayed, the callback cannot be triggered immediately quotaManager.throttledRequestReaper.doWork() assertEquals(10, numCallbacks) time.sleep(throttleTime) // Callback can only be triggered after the delay time passes quotaManager.throttledRequestReaper.doWork() assertEquals(0, queueSizeMetric.value().toInt) assertEquals(11, numCallbacks) // Could continue to see delays until the bursty sample disappears for (_ <- 0 until 11) { quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", millisToPercent(4), callback) time.sleep(1000) } assertEquals("Should be unthrottled since bursty sample has rolled over", 0, quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", 0, callback)) // Create a very large spike which requires > one quota window to bring within quota assertEquals(1000, quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", millisToPercent(500), callback)) for (_ <- 0 until 10) { time.sleep(1000) assertEquals(1000, quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", 0, callback)) } time.sleep(1000) assertEquals("Should be unthrottled since bursty sample has rolled over", 0, quotaManager.maybeRecordAndThrottle("ANONYMOUS", "test-client", 0, callback)) } finally { quotaManager.shutdown() } } @Test def testExpireThrottleTimeSensor() { val metrics = newMetrics val clientMetrics = new ClientQuotaManager(config, metrics, QuotaType.Produce, time) try { clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "client1", 100, callback) // remove the throttle time sensor metrics.removeSensor("ProduceThrottleTime-:client1") // should not throw an exception even if the throttle time sensor does not exist. val throttleTime = clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "client1", 10000, callback) assertTrue("Should be throttled", throttleTime > 0) // the sensor should get recreated val throttleTimeSensor = metrics.getSensor("ProduceThrottleTime-:client1") assertTrue("Throttle time sensor should exist", throttleTimeSensor != null) } finally { clientMetrics.shutdown() } } @Test def testExpireQuotaSensors() { val metrics = newMetrics val clientMetrics = new ClientQuotaManager(config, metrics, QuotaType.Produce, time) try { clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "client1", 100, callback) // remove all the sensors metrics.removeSensor("ProduceThrottleTime-:client1") metrics.removeSensor("Produce-ANONYMOUS:client1") // should not throw an exception val throttleTime = clientMetrics.maybeRecordAndThrottle("ANONYMOUS", "client1", 10000, callback) assertTrue("Should be throttled", throttleTime > 0) // all the sensors should get recreated val throttleTimeSensor = metrics.getSensor("ProduceThrottleTime-:client1") assertTrue("Throttle time sensor should exist", throttleTimeSensor != null) val byteRateSensor = metrics.getSensor("Produce-:client1") assertTrue("Byte rate sensor should exist", byteRateSensor != null) } finally { clientMetrics.shutdown() } } @Test def testQuotaUserSanitize() { val principal = "CN=Some characters !@#$%&*()_-+=';:,/~" val sanitizedPrincipal = QuotaId.sanitize(principal) // Apart from % used in percent-encoding all characters of sanitized principal must be characters allowed in client-id ConfigCommand.validateChars("sanitized-principal", sanitizedPrincipal.replace('%', '_')) assertEquals(principal, QuotaId.desanitize(sanitizedPrincipal)) } def newMetrics: Metrics = { new Metrics(new MetricConfig(), Collections.emptyList(), time) } private case class UserClient(val user: String, val clientId: String, val configUser: Option[String] = None, val configClientId: Option[String] = None) }
apache-2.0
vinyar/chef-dk
lib/chef-dk/command/update.rb
4086
# # Copyright:: Copyright (c) 2014 Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require 'chef-dk/command/base' require 'chef-dk/ui' require 'chef-dk/policyfile_services/install' require 'chef-dk/policyfile_services/update_attributes' require 'chef-dk/configurable' module ChefDK module Command class Update < Base include Configurable banner(<<-BANNER) Usage: chef update [ POLICY_FILE ] [options] `chef update` reads your `Policyfile.rb`, applies any changes, re-solves the dependencies and emits an updated `Policyfile.lock.json`. The new locked policy will reflect any changes to the `run_list` and pull in any cookbook updates that are compatible with the version constraints stated in your `Policyfile.rb`. NOTE: `chef update` does not yet support granular updates (e.g., just updating the `run_list` or a specific cookbook version). Support will be added in a future version. See our detailed README for more information: https://docs.chef.io/policyfile.html Options: BANNER option :config_file, short: "-c CONFIG_FILE", long: "--config CONFIG_FILE", description: "Path to configuration file" option :debug, short: "-D", long: "--debug", description: "Enable stacktraces and other debug output", default: false, boolean: true option :update_attributes, short: "-a", long: "--attributes", description: "Update attributes", default: false, boolean: true attr_reader :policyfile_relative_path attr_accessor :ui def initialize(*args) super @ui = UI.new @policyfile_relative_path = nil @installer = nil @attributes_updater = nil end def run(params = []) return 1 unless apply_params!(params) # Force config file to be loaded. We don't use the configuration # directly, but the user may have SSL configuration options that they # need to talk to a private supermarket (e.g., trusted_certs or # ssl_verify_mode) chef_config if update_attributes? attributes_updater.run else installer.run end 0 rescue PolicyfileServiceError => e handle_error(e) 1 end def installer @installer ||= PolicyfileServices::Install.new(policyfile: policyfile_relative_path, ui: ui, root_dir: Dir.pwd, overwrite: true) end def attributes_updater @attributes_updater ||= PolicyfileServices::UpdateAttributes.new(policyfile: policyfile_relative_path, ui: ui, root_dir: Dir.pwd) end def debug? !!config[:debug] end def config_path config[:config_file] end def update_attributes? !!config[:update_attributes] end def handle_error(error) ui.err("Error: #{error.message}") if error.respond_to?(:reason) ui.err("Reason: #{error.reason}") ui.err("") ui.err(error.extended_error_info) if debug? ui.err(error.cause.backtrace.join("\n")) if debug? end end def apply_params!(params) remaining_args = parse_options(params) if remaining_args.size > 1 ui.err(opt_parser) false else @policyfile_relative_path = remaining_args.first true end end end end end
apache-2.0
apixandru/intellij-community
java/java-analysis-impl/src/com/intellij/codeInspection/dataFlow/inliner/CollectionFactoryInliner.java
3317
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInspection.dataFlow.inliner; import com.intellij.codeInspection.dataFlow.CFGBuilder; import com.intellij.codeInspection.dataFlow.Nullness; import com.intellij.codeInspection.dataFlow.SpecialField; import com.intellij.codeInspection.dataFlow.value.DfaValueFactory; import com.intellij.codeInspection.dataFlow.value.DfaVariableValue; import com.intellij.psi.PsiExpression; import com.intellij.psi.PsiMethodCallExpression; import com.intellij.psi.PsiVariable; import com.siyeh.ig.callMatcher.CallMapper; import org.jetbrains.annotations.NotNull; import static com.intellij.codeInspection.dataFlow.SpecialField.COLLECTION_SIZE; import static com.intellij.codeInspection.dataFlow.SpecialField.MAP_SIZE; import static com.intellij.psi.CommonClassNames.JAVA_UTIL_COLLECTIONS; import static com.siyeh.ig.callMatcher.CallMatcher.staticCall; public class CollectionFactoryInliner implements CallInliner { static final class FactoryInfo { int mySize; SpecialField mySizeField; public FactoryInfo(int size, SpecialField sizeField) { mySize = size; mySizeField = sizeField; } } private static final CallMapper<FactoryInfo> STATIC_FACTORIES = new CallMapper<FactoryInfo>() .register(staticCall(JAVA_UTIL_COLLECTIONS, "emptyList", "emptySet").parameterCount(0), new FactoryInfo(0, COLLECTION_SIZE)) .register(staticCall(JAVA_UTIL_COLLECTIONS, "singletonList", "singleton").parameterCount(1), new FactoryInfo(1, COLLECTION_SIZE)) .register(staticCall(JAVA_UTIL_COLLECTIONS, "emptyMap").parameterCount(0), new FactoryInfo(0, MAP_SIZE)) .register(staticCall(JAVA_UTIL_COLLECTIONS, "singletonMap").parameterCount(2), new FactoryInfo(1, MAP_SIZE)); @Override public boolean tryInlineCall(@NotNull CFGBuilder builder, @NotNull PsiMethodCallExpression call) { FactoryInfo factoryInfo = STATIC_FACTORIES.mapFirst(call); if (factoryInfo == null) return false; PsiExpression[] args = call.getArgumentList().getExpressions(); for (PsiExpression arg : args) { builder.pushExpression(arg).pop(); } PsiVariable variable = builder.createTempVariable(call.getType()); DfaValueFactory factory = builder.getFactory(); DfaVariableValue variableValue = factory.getVarFactory().createVariableValue(variable, false); builder.pushVariable(variable) // tmpVar = <Value of collection type> .push(factory.createTypeValue(call.getType(), Nullness.NOT_NULL)) .assign() // leave tmpVar on stack: it's result of method call .push(factoryInfo.mySizeField.createValue(factory, variableValue)) // tmpVar.size = <size> .push(factory.getInt(factoryInfo.mySize)) .assign() .pop(); return true; } }
apache-2.0
sergecodd/FireFox-OS
B2G/gecko/dom/src/events/nsJSEventListener.h
1431
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef nsJSEventListener_h__ #define nsJSEventListener_h__ #include "nsIDOMKeyEvent.h" #include "nsIJSEventListener.h" #include "nsIDOMEventListener.h" #include "jsapi.h" #include "nsCOMPtr.h" #include "nsIAtom.h" #include "nsIScriptContext.h" #include "nsCycleCollectionParticipant.h" // nsJSEventListener interface // misnamed - JS no longer has exclusive rights over this interface! class nsJSEventListener : public nsIJSEventListener { public: nsJSEventListener(nsIScriptContext* aContext, JSObject* aScopeObject, nsISupports* aTarget, nsIAtom* aType, JSObject* aHandler); virtual ~nsJSEventListener(); NS_DECL_CYCLE_COLLECTING_ISUPPORTS // nsIDOMEventListener interface NS_DECL_NSIDOMEVENTLISTENER // nsIJSEventListener virtual void SetHandler(JSObject *aHandler); virtual size_t SizeOfIncludingThis(nsMallocSizeOfFun aMallocSizeOf) const { return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); } NS_DECL_CYCLE_COLLECTION_SKIPPABLE_SCRIPT_HOLDER_CLASS(nsJSEventListener) protected: bool IsBlackForCC(); nsCOMPtr<nsIAtom> mEventName; }; #endif //nsJSEventListener_h__
apache-2.0
lordjone/libgdx
extensions/gdx-bullet/jni/swig-src/dynamics/com/badlogic/gdx/physics/bullet/dynamics/btSolverBody.java
9009
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 3.0.0 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package com.badlogic.gdx.physics.bullet.dynamics; import com.badlogic.gdx.physics.bullet.BulletBase; import com.badlogic.gdx.physics.bullet.linearmath.*; import com.badlogic.gdx.physics.bullet.collision.*; import com.badlogic.gdx.math.Vector3; import com.badlogic.gdx.math.Quaternion; import com.badlogic.gdx.math.Matrix3; import com.badlogic.gdx.math.Matrix4; public class btSolverBody extends BulletBase { private long swigCPtr; protected btSolverBody(final String className, long cPtr, boolean cMemoryOwn) { super(className, cPtr, cMemoryOwn); swigCPtr = cPtr; } /** Construct a new btSolverBody, normally you should not need this constructor it's intended for low-level usage. */ public btSolverBody(long cPtr, boolean cMemoryOwn) { this("btSolverBody", cPtr, cMemoryOwn); construct(); } @Override protected void reset(long cPtr, boolean cMemoryOwn) { if (!destroyed) destroy(); super.reset(swigCPtr = cPtr, cMemoryOwn); } public static long getCPtr(btSolverBody obj) { return (obj == null) ? 0 : obj.swigCPtr; } @Override protected void finalize() throws Throwable { if (!destroyed) destroy(); super.finalize(); } @Override protected synchronized void delete() { if (swigCPtr != 0) { if (swigCMemOwn) { swigCMemOwn = false; DynamicsJNI.delete_btSolverBody(swigCPtr); } swigCPtr = 0; } super.delete(); } public void setWorldTransform(btTransform value) { DynamicsJNI.btSolverBody_worldTransform_set(swigCPtr, this, btTransform.getCPtr(value), value); } public btTransform getWorldTransform() { long cPtr = DynamicsJNI.btSolverBody_worldTransform_get(swigCPtr, this); return (cPtr == 0) ? null : new btTransform(cPtr, false); } public void setDeltaLinearVelocity(btVector3 value) { DynamicsJNI.btSolverBody_deltaLinearVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getDeltaLinearVelocity() { long cPtr = DynamicsJNI.btSolverBody_deltaLinearVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setDeltaAngularVelocity(btVector3 value) { DynamicsJNI.btSolverBody_deltaAngularVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getDeltaAngularVelocity() { long cPtr = DynamicsJNI.btSolverBody_deltaAngularVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setAngularFactor(btVector3 value) { DynamicsJNI.btSolverBody_angularFactor_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getAngularFactor() { long cPtr = DynamicsJNI.btSolverBody_angularFactor_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setLinearFactor(btVector3 value) { DynamicsJNI.btSolverBody_linearFactor_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getLinearFactor() { long cPtr = DynamicsJNI.btSolverBody_linearFactor_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setInvMass(btVector3 value) { DynamicsJNI.btSolverBody_invMass_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getInvMass() { long cPtr = DynamicsJNI.btSolverBody_invMass_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setPushVelocity(btVector3 value) { DynamicsJNI.btSolverBody_pushVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getPushVelocity() { long cPtr = DynamicsJNI.btSolverBody_pushVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setTurnVelocity(btVector3 value) { DynamicsJNI.btSolverBody_turnVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getTurnVelocity() { long cPtr = DynamicsJNI.btSolverBody_turnVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setLinearVelocity(btVector3 value) { DynamicsJNI.btSolverBody_linearVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getLinearVelocity() { long cPtr = DynamicsJNI.btSolverBody_linearVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setAngularVelocity(btVector3 value) { DynamicsJNI.btSolverBody_angularVelocity_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getAngularVelocity() { long cPtr = DynamicsJNI.btSolverBody_angularVelocity_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setExternalForceImpulse(btVector3 value) { DynamicsJNI.btSolverBody_externalForceImpulse_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getExternalForceImpulse() { long cPtr = DynamicsJNI.btSolverBody_externalForceImpulse_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setExternalTorqueImpulse(btVector3 value) { DynamicsJNI.btSolverBody_externalTorqueImpulse_set(swigCPtr, this, btVector3.getCPtr(value), value); } public btVector3 getExternalTorqueImpulse() { long cPtr = DynamicsJNI.btSolverBody_externalTorqueImpulse_get(swigCPtr, this); return (cPtr == 0) ? null : new btVector3(cPtr, false); } public void setOriginalBody(btRigidBody value) { DynamicsJNI.btSolverBody_originalBody_set(swigCPtr, this, btRigidBody.getCPtr(value), value); } public btRigidBody getOriginalBody() { long cPtr = DynamicsJNI.btSolverBody_originalBody_get(swigCPtr, this); return (cPtr == 0) ? null : new btRigidBody(cPtr, false); } public void getVelocityInLocalPointNoDelta(Vector3 rel_pos, Vector3 velocity) { DynamicsJNI.btSolverBody_getVelocityInLocalPointNoDelta(swigCPtr, this, rel_pos, velocity); } public void getVelocityInLocalPointObsolete(Vector3 rel_pos, Vector3 velocity) { DynamicsJNI.btSolverBody_getVelocityInLocalPointObsolete(swigCPtr, this, rel_pos, velocity); } public void getAngularVelocity(Vector3 angVel) { DynamicsJNI.btSolverBody_getAngularVelocity(swigCPtr, this, angVel); } public void applyImpulse(Vector3 linearComponent, Vector3 angularComponent, float impulseMagnitude) { DynamicsJNI.btSolverBody_applyImpulse(swigCPtr, this, linearComponent, angularComponent, impulseMagnitude); } public void internalApplyPushImpulse(Vector3 linearComponent, Vector3 angularComponent, float impulseMagnitude) { DynamicsJNI.btSolverBody_internalApplyPushImpulse(swigCPtr, this, linearComponent, angularComponent, impulseMagnitude); } public Vector3 internalGetDeltaLinearVelocity() { return DynamicsJNI.btSolverBody_internalGetDeltaLinearVelocity(swigCPtr, this); } public Vector3 internalGetDeltaAngularVelocity() { return DynamicsJNI.btSolverBody_internalGetDeltaAngularVelocity(swigCPtr, this); } public Vector3 internalGetAngularFactor() { return DynamicsJNI.btSolverBody_internalGetAngularFactor(swigCPtr, this); } public Vector3 internalGetInvMass() { return DynamicsJNI.btSolverBody_internalGetInvMass(swigCPtr, this); } public void internalSetInvMass(Vector3 invMass) { DynamicsJNI.btSolverBody_internalSetInvMass(swigCPtr, this, invMass); } public Vector3 internalGetPushVelocity() { return DynamicsJNI.btSolverBody_internalGetPushVelocity(swigCPtr, this); } public Vector3 internalGetTurnVelocity() { return DynamicsJNI.btSolverBody_internalGetTurnVelocity(swigCPtr, this); } public void internalGetVelocityInLocalPointObsolete(Vector3 rel_pos, Vector3 velocity) { DynamicsJNI.btSolverBody_internalGetVelocityInLocalPointObsolete(swigCPtr, this, rel_pos, velocity); } public void internalGetAngularVelocity(Vector3 angVel) { DynamicsJNI.btSolverBody_internalGetAngularVelocity(swigCPtr, this, angVel); } public void internalApplyImpulse(Vector3 linearComponent, Vector3 angularComponent, float impulseMagnitude) { DynamicsJNI.btSolverBody_internalApplyImpulse(swigCPtr, this, linearComponent, angularComponent, impulseMagnitude); } public void writebackVelocity() { DynamicsJNI.btSolverBody_writebackVelocity(swigCPtr, this); } public void writebackVelocityAndTransform(float timeStep, float splitImpulseTurnErp) { DynamicsJNI.btSolverBody_writebackVelocityAndTransform(swigCPtr, this, timeStep, splitImpulseTurnErp); } public btSolverBody() { this(DynamicsJNI.new_btSolverBody(), true); } }
apache-2.0
google/go-genproto
googleapis/cloud/osconfig/v1alpha/instance_os_policies_compliance.pb.go
36598
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/cloud/osconfig/v1alpha/instance_os_policies_compliance.proto package osconfig import ( reflect "reflect" sync "sync" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This API resource represents the OS policies compliance data for a Compute // Engine virtual machine (VM) instance at a given point in time. // // A Compute Engine VM can have multiple OS policy assignments, and each // assignment can have multiple OS policies. As a result, multiple OS policies // could be applied to a single VM. // // You can use this API resource to determine both the compliance state of your // VM as well as the compliance state of an individual OS policy. // // For more information, see [View // compliance](https://cloud.google.com/compute/docs/os-configuration-management/view-compliance). type InstanceOSPoliciesCompliance struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Output only. The `InstanceOSPoliciesCompliance` API resource name. // // Format: // `projects/{project_number}/locations/{location}/instanceOSPoliciesCompliances/{instance_id}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Output only. The Compute Engine VM instance name. Instance string `protobuf:"bytes,2,opt,name=instance,proto3" json:"instance,omitempty"` // Output only. Compliance state of the VM. State OSPolicyComplianceState `protobuf:"varint,3,opt,name=state,proto3,enum=google.cloud.osconfig.v1alpha.OSPolicyComplianceState" json:"state,omitempty"` // Output only. Detailed compliance state of the VM. // This field is populated only when compliance state is `UNKNOWN`. // // It may contain one of the following values: // // * `no-compliance-data`: Compliance data is not available for this VM. // * `no-agent-detected`: OS Config agent is not detected for this VM. // * `config-not-supported-by-agent`: The version of the OS Config agent // running on this VM does not support configuration management. // * `inactive`: VM is not running. // * `internal-service-errors`: There were internal service errors encountered // while enforcing compliance. // * `agent-errors`: OS config agent encountered errors while enforcing // compliance. DetailedState string `protobuf:"bytes,4,opt,name=detailed_state,json=detailedState,proto3" json:"detailed_state,omitempty"` // Output only. The reason for the `detailed_state` of the VM (if any). DetailedStateReason string `protobuf:"bytes,5,opt,name=detailed_state_reason,json=detailedStateReason,proto3" json:"detailed_state_reason,omitempty"` // Output only. Compliance data for each `OSPolicy` that is applied to the VM. OsPolicyCompliances []*InstanceOSPoliciesCompliance_OSPolicyCompliance `protobuf:"bytes,6,rep,name=os_policy_compliances,json=osPolicyCompliances,proto3" json:"os_policy_compliances,omitempty"` // Output only. Timestamp of the last compliance check for the VM. LastComplianceCheckTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=last_compliance_check_time,json=lastComplianceCheckTime,proto3" json:"last_compliance_check_time,omitempty"` // Output only. Unique identifier for the last compliance run. // This id will be logged by the OS config agent during a compliance run and // can be used for debugging and tracing purpose. LastComplianceRunId string `protobuf:"bytes,8,opt,name=last_compliance_run_id,json=lastComplianceRunId,proto3" json:"last_compliance_run_id,omitempty"` } func (x *InstanceOSPoliciesCompliance) Reset() { *x = InstanceOSPoliciesCompliance{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InstanceOSPoliciesCompliance) String() string { return protoimpl.X.MessageStringOf(x) } func (*InstanceOSPoliciesCompliance) ProtoMessage() {} func (x *InstanceOSPoliciesCompliance) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InstanceOSPoliciesCompliance.ProtoReflect.Descriptor instead. func (*InstanceOSPoliciesCompliance) Descriptor() ([]byte, []int) { return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP(), []int{0} } func (x *InstanceOSPoliciesCompliance) GetName() string { if x != nil { return x.Name } return "" } func (x *InstanceOSPoliciesCompliance) GetInstance() string { if x != nil { return x.Instance } return "" } func (x *InstanceOSPoliciesCompliance) GetState() OSPolicyComplianceState { if x != nil { return x.State } return OSPolicyComplianceState_OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED } func (x *InstanceOSPoliciesCompliance) GetDetailedState() string { if x != nil { return x.DetailedState } return "" } func (x *InstanceOSPoliciesCompliance) GetDetailedStateReason() string { if x != nil { return x.DetailedStateReason } return "" } func (x *InstanceOSPoliciesCompliance) GetOsPolicyCompliances() []*InstanceOSPoliciesCompliance_OSPolicyCompliance { if x != nil { return x.OsPolicyCompliances } return nil } func (x *InstanceOSPoliciesCompliance) GetLastComplianceCheckTime() *timestamppb.Timestamp { if x != nil { return x.LastComplianceCheckTime } return nil } func (x *InstanceOSPoliciesCompliance) GetLastComplianceRunId() string { if x != nil { return x.LastComplianceRunId } return "" } // A request message for getting OS policies compliance data for the given // Compute Engine VM instance. type GetInstanceOSPoliciesComplianceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. API resource name for instance OS policies compliance resource. // // Format: // `projects/{project}/locations/{location}/instanceOSPoliciesCompliances/{instance}` // // For `{project}`, either Compute Engine project-number or project-id can be // provided. // For `{instance}`, either Compute Engine VM instance-id or instance-name can // be provided. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *GetInstanceOSPoliciesComplianceRequest) Reset() { *x = GetInstanceOSPoliciesComplianceRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetInstanceOSPoliciesComplianceRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetInstanceOSPoliciesComplianceRequest) ProtoMessage() {} func (x *GetInstanceOSPoliciesComplianceRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetInstanceOSPoliciesComplianceRequest.ProtoReflect.Descriptor instead. func (*GetInstanceOSPoliciesComplianceRequest) Descriptor() ([]byte, []int) { return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP(), []int{1} } func (x *GetInstanceOSPoliciesComplianceRequest) GetName() string { if x != nil { return x.Name } return "" } // A request message for listing OS policies compliance data for all Compute // Engine VMs in the given location. type ListInstanceOSPoliciesCompliancesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The parent resource name. // // Format: `projects/{project}/locations/{location}` // // For `{project}`, either Compute Engine project-number or project-id can be // provided. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // The maximum number of results to return. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // A pagination token returned from a previous call to // `ListInstanceOSPoliciesCompliances` that indicates where this listing // should continue from. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` // If provided, this field specifies the criteria that must be met by a // `InstanceOSPoliciesCompliance` API resource to be included in the response. Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` } func (x *ListInstanceOSPoliciesCompliancesRequest) Reset() { *x = ListInstanceOSPoliciesCompliancesRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListInstanceOSPoliciesCompliancesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListInstanceOSPoliciesCompliancesRequest) ProtoMessage() {} func (x *ListInstanceOSPoliciesCompliancesRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListInstanceOSPoliciesCompliancesRequest.ProtoReflect.Descriptor instead. func (*ListInstanceOSPoliciesCompliancesRequest) Descriptor() ([]byte, []int) { return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP(), []int{2} } func (x *ListInstanceOSPoliciesCompliancesRequest) GetParent() string { if x != nil { return x.Parent } return "" } func (x *ListInstanceOSPoliciesCompliancesRequest) GetPageSize() int32 { if x != nil { return x.PageSize } return 0 } func (x *ListInstanceOSPoliciesCompliancesRequest) GetPageToken() string { if x != nil { return x.PageToken } return "" } func (x *ListInstanceOSPoliciesCompliancesRequest) GetFilter() string { if x != nil { return x.Filter } return "" } // A response message for listing OS policies compliance data for all Compute // Engine VMs in the given location. type ListInstanceOSPoliciesCompliancesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // List of instance OS policies compliance objects. InstanceOsPoliciesCompliances []*InstanceOSPoliciesCompliance `protobuf:"bytes,1,rep,name=instance_os_policies_compliances,json=instanceOsPoliciesCompliances,proto3" json:"instance_os_policies_compliances,omitempty"` // The pagination token to retrieve the next page of instance OS policies // compliance objects. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } func (x *ListInstanceOSPoliciesCompliancesResponse) Reset() { *x = ListInstanceOSPoliciesCompliancesResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListInstanceOSPoliciesCompliancesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListInstanceOSPoliciesCompliancesResponse) ProtoMessage() {} func (x *ListInstanceOSPoliciesCompliancesResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListInstanceOSPoliciesCompliancesResponse.ProtoReflect.Descriptor instead. func (*ListInstanceOSPoliciesCompliancesResponse) Descriptor() ([]byte, []int) { return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP(), []int{3} } func (x *ListInstanceOSPoliciesCompliancesResponse) GetInstanceOsPoliciesCompliances() []*InstanceOSPoliciesCompliance { if x != nil { return x.InstanceOsPoliciesCompliances } return nil } func (x *ListInstanceOSPoliciesCompliancesResponse) GetNextPageToken() string { if x != nil { return x.NextPageToken } return "" } // Compliance data for an OS policy type InstanceOSPoliciesCompliance_OSPolicyCompliance struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The OS policy id OsPolicyId string `protobuf:"bytes,1,opt,name=os_policy_id,json=osPolicyId,proto3" json:"os_policy_id,omitempty"` // Reference to the `OSPolicyAssignment` API resource that the `OSPolicy` // belongs to. // // Format: // `projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id@revision_id}` OsPolicyAssignment string `protobuf:"bytes,2,opt,name=os_policy_assignment,json=osPolicyAssignment,proto3" json:"os_policy_assignment,omitempty"` // Compliance state of the OS policy. State OSPolicyComplianceState `protobuf:"varint,4,opt,name=state,proto3,enum=google.cloud.osconfig.v1alpha.OSPolicyComplianceState" json:"state,omitempty"` // Compliance data for each `OSPolicyResource` that is applied to the // VM. OsPolicyResourceCompliances []*OSPolicyResourceCompliance `protobuf:"bytes,5,rep,name=os_policy_resource_compliances,json=osPolicyResourceCompliances,proto3" json:"os_policy_resource_compliances,omitempty"` } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) Reset() { *x = InstanceOSPoliciesCompliance_OSPolicyCompliance{} if protoimpl.UnsafeEnabled { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) String() string { return protoimpl.X.MessageStringOf(x) } func (*InstanceOSPoliciesCompliance_OSPolicyCompliance) ProtoMessage() {} func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InstanceOSPoliciesCompliance_OSPolicyCompliance.ProtoReflect.Descriptor instead. func (*InstanceOSPoliciesCompliance_OSPolicyCompliance) Descriptor() ([]byte, []int) { return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP(), []int{0, 0} } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) GetOsPolicyId() string { if x != nil { return x.OsPolicyId } return "" } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) GetOsPolicyAssignment() string { if x != nil { return x.OsPolicyAssignment } return "" } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) GetState() OSPolicyComplianceState { if x != nil { return x.State } return OSPolicyComplianceState_OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED } func (x *InstanceOSPoliciesCompliance_OSPolicyCompliance) GetOsPolicyResourceCompliances() []*OSPolicyResourceCompliance { if x != nil { return x.OsPolicyResourceCompliances } return nil } var File_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto protoreflect.FileDescriptor var file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDesc = []byte{ 0x0a, 0x43, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x08, 0x0a, 0x1c, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x87, 0x01, 0x0a, 0x15, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6f, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x1a, 0xe7, 0x02, 0x0a, 0x12, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x14, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6f, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x7e, 0x0a, 0x1e, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1b, 0x6f, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x8c, 0x01, 0xea, 0x41, 0x88, 0x01, 0x0a, 0x34, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x50, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x22, 0x7a, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x36, 0x0a, 0x34, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc1, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x84, 0x01, 0x0a, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0xf2, 0x01, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x21, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x53, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x3b, 0x6f, 0x73, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xaa, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4f, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x56, 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0xca, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4f, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xea, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4f, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescOnce sync.Once file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescData = file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDesc ) func file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescGZIP() []byte { file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescOnce.Do(func() { file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescData) }) return file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDescData } var file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_goTypes = []interface{}{ (*InstanceOSPoliciesCompliance)(nil), // 0: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance (*GetInstanceOSPoliciesComplianceRequest)(nil), // 1: google.cloud.osconfig.v1alpha.GetInstanceOSPoliciesComplianceRequest (*ListInstanceOSPoliciesCompliancesRequest)(nil), // 2: google.cloud.osconfig.v1alpha.ListInstanceOSPoliciesCompliancesRequest (*ListInstanceOSPoliciesCompliancesResponse)(nil), // 3: google.cloud.osconfig.v1alpha.ListInstanceOSPoliciesCompliancesResponse (*InstanceOSPoliciesCompliance_OSPolicyCompliance)(nil), // 4: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.OSPolicyCompliance (OSPolicyComplianceState)(0), // 5: google.cloud.osconfig.v1alpha.OSPolicyComplianceState (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp (*OSPolicyResourceCompliance)(nil), // 7: google.cloud.osconfig.v1alpha.OSPolicyResourceCompliance } var file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_depIdxs = []int32{ 5, // 0: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.state:type_name -> google.cloud.osconfig.v1alpha.OSPolicyComplianceState 4, // 1: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.os_policy_compliances:type_name -> google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.OSPolicyCompliance 6, // 2: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.last_compliance_check_time:type_name -> google.protobuf.Timestamp 0, // 3: google.cloud.osconfig.v1alpha.ListInstanceOSPoliciesCompliancesResponse.instance_os_policies_compliances:type_name -> google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance 5, // 4: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.OSPolicyCompliance.state:type_name -> google.cloud.osconfig.v1alpha.OSPolicyComplianceState 7, // 5: google.cloud.osconfig.v1alpha.InstanceOSPoliciesCompliance.OSPolicyCompliance.os_policy_resource_compliances:type_name -> google.cloud.osconfig.v1alpha.OSPolicyResourceCompliance 6, // [6:6] is the sub-list for method output_type 6, // [6:6] is the sub-list for method input_type 6, // [6:6] is the sub-list for extension type_name 6, // [6:6] is the sub-list for extension extendee 0, // [0:6] is the sub-list for field type_name } func init() { file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_init() } func file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_init() { if File_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto != nil { return } file_google_cloud_osconfig_v1alpha_config_common_proto_init() if !protoimpl.UnsafeEnabled { file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InstanceOSPoliciesCompliance); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetInstanceOSPoliciesComplianceRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListInstanceOSPoliciesCompliancesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListInstanceOSPoliciesCompliancesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InstanceOSPoliciesCompliance_OSPolicyCompliance); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDesc, NumEnums: 0, NumMessages: 5, NumExtensions: 0, NumServices: 0, }, GoTypes: file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_goTypes, DependencyIndexes: file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_depIdxs, MessageInfos: file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_msgTypes, }.Build() File_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto = out.File file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_rawDesc = nil file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_goTypes = nil file_google_cloud_osconfig_v1alpha_instance_os_policies_compliance_proto_depIdxs = nil }
apache-2.0
life-beam/j2objc
jre_emul/android/platform/libcore/ojluni/src/main/java/java/io/PrintStream.java
41821
/* * Copyright (C) 2014 The Android Open Source Project * Copyright (c) 1996, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.io; import com.google.j2objc.WeakProxy; import java.util.Formatter; import java.util.Locale; import java.nio.charset.Charset; import java.nio.charset.IllegalCharsetNameException; import java.nio.charset.UnsupportedCharsetException; /** * A <code>PrintStream</code> adds functionality to another output stream, * namely the ability to print representations of various data values * conveniently. Two other features are provided as well. Unlike other output * streams, a <code>PrintStream</code> never throws an * <code>IOException</code>; instead, exceptional situations merely set an * internal flag that can be tested via the <code>checkError</code> method. * Optionally, a <code>PrintStream</code> can be created so as to flush * automatically; this means that the <code>flush</code> method is * automatically invoked after a byte array is written, one of the * <code>println</code> methods is invoked, or a newline character or byte * (<code>'\n'</code>) is written. * * <p> All characters printed by a <code>PrintStream</code> are converted into * bytes using the platform's default character encoding. The <code>{@link * PrintWriter}</code> class should be used in situations that require writing * characters rather than bytes. * * @author Frank Yellin * @author Mark Reinhold * @since JDK1.0 */ public class PrintStream extends FilterOutputStream implements Appendable, Closeable { private final boolean autoFlush; private boolean trouble = false; private Formatter formatter; /** * Track both the text- and character-output streams, so that their buffers * can be flushed without flushing the entire stream. */ private BufferedWriter textOut; private OutputStreamWriter charOut; private Charset charset; /** * requireNonNull is explicitly declared here so as not to create an extra * dependency on java.util.Objects.requireNonNull. PrintStream is loaded * early during system initialization. */ private static <T> T requireNonNull(T obj, String message) { if (obj == null) throw new NullPointerException(message); return obj; } /** * Returns a charset object for the given charset name. * @throws NullPointerException is csn is null * @throws UnsupportedEncodingException if the charset is not supported */ private static Charset toCharset(String csn) throws UnsupportedEncodingException { requireNonNull(csn, "charsetName"); try { return Charset.forName(csn); } catch (IllegalCharsetNameException|UnsupportedCharsetException unused) { // UnsupportedEncodingException should be thrown throw new UnsupportedEncodingException(csn); } } /* Private constructors */ private PrintStream(boolean autoFlush, OutputStream out) { super(out); this.autoFlush = autoFlush; } private PrintStream(boolean autoFlush, OutputStream out, Charset charset) { super(out); this.autoFlush = autoFlush; } /* Variant of the private constructor so that the given charset name * can be verified before evaluating the OutputStream argument. Used * by constructors creating a FileOutputStream that also take a * charset name. */ private PrintStream(boolean autoFlush, Charset charset, OutputStream out) throws UnsupportedEncodingException { this(autoFlush, out, charset); } /** * Creates a new print stream. This stream will not flush automatically. * * @param out The output stream to which values and objects will be * printed * * @see java.io.PrintWriter#PrintWriter(java.io.OutputStream) */ public PrintStream(OutputStream out) { this(out, false); } /** * Creates a new print stream. * * @param out The output stream to which values and objects will be * printed * @param autoFlush A boolean; if true, the output buffer will be flushed * whenever a byte array is written, one of the * <code>println</code> methods is invoked, or a newline * character or byte (<code>'\n'</code>) is written * * @see java.io.PrintWriter#PrintWriter(java.io.OutputStream, boolean) */ public PrintStream(OutputStream out, boolean autoFlush) { this(autoFlush, requireNonNull(out, "Null output stream")); } /** * Creates a new print stream. * * @param out The output stream to which values and objects will be * printed * @param autoFlush A boolean; if true, the output buffer will be flushed * whenever a byte array is written, one of the * <code>println</code> methods is invoked, or a newline * character or byte (<code>'\n'</code>) is written * @param encoding The name of a supported * <a href="../lang/package-summary.html#charenc"> * character encoding</a> * * @throws UnsupportedEncodingException * If the named encoding is not supported * * @since 1.4 */ public PrintStream(OutputStream out, boolean autoFlush, String encoding) throws UnsupportedEncodingException { this(autoFlush, requireNonNull(out, "Null output stream"), toCharset(encoding)); } /** * Creates a new print stream, without automatic line flushing, with the * specified file name. This convenience constructor creates * the necessary intermediate {@link java.io.OutputStreamWriter * OutputStreamWriter}, which will encode characters using the * {@linkplain java.nio.charset.Charset#defaultCharset() default charset} * for this instance of the Java virtual machine. * * @param fileName * The name of the file to use as the destination of this print * stream. If the file exists, then it will be truncated to * zero size; otherwise, a new file will be created. The output * will be written to the file and is buffered. * * @throws FileNotFoundException * If the given file object does not denote an existing, writable * regular file and a new regular file of that name cannot be * created, or if some other error occurs while opening or * creating the file * * @throws SecurityException * If a security manager is present and {@link * SecurityManager#checkWrite checkWrite(fileName)} denies write * access to the file * * @since 1.5 */ public PrintStream(String fileName) throws FileNotFoundException { this(false, new FileOutputStream(fileName)); } /** * Creates a new print stream, without automatic line flushing, with the * specified file name and charset. This convenience constructor creates * the necessary intermediate {@link java.io.OutputStreamWriter * OutputStreamWriter}, which will encode characters using the provided * charset. * * @param fileName * The name of the file to use as the destination of this print * stream. If the file exists, then it will be truncated to * zero size; otherwise, a new file will be created. The output * will be written to the file and is buffered. * * @param csn * The name of a supported {@linkplain java.nio.charset.Charset * charset} * * @throws FileNotFoundException * If the given file object does not denote an existing, writable * regular file and a new regular file of that name cannot be * created, or if some other error occurs while opening or * creating the file * * @throws SecurityException * If a security manager is present and {@link * SecurityManager#checkWrite checkWrite(fileName)} denies write * access to the file * * @throws UnsupportedEncodingException * If the named charset is not supported * * @since 1.5 */ public PrintStream(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException { // ensure charset is checked before the file is opened this(false, toCharset(csn), new FileOutputStream(fileName)); } /** * Creates a new print stream, without automatic line flushing, with the * specified file. This convenience constructor creates the necessary * intermediate {@link java.io.OutputStreamWriter OutputStreamWriter}, * which will encode characters using the {@linkplain * java.nio.charset.Charset#defaultCharset() default charset} for this * instance of the Java virtual machine. * * @param file * The file to use as the destination of this print stream. If the * file exists, then it will be truncated to zero size; otherwise, * a new file will be created. The output will be written to the * file and is buffered. * * @throws FileNotFoundException * If the given file object does not denote an existing, writable * regular file and a new regular file of that name cannot be * created, or if some other error occurs while opening or * creating the file * * @throws SecurityException * If a security manager is present and {@link * SecurityManager#checkWrite checkWrite(file.getPath())} * denies write access to the file * * @since 1.5 */ public PrintStream(File file) throws FileNotFoundException { this(false, new FileOutputStream(file)); } /** * Creates a new print stream, without automatic line flushing, with the * specified file and charset. This convenience constructor creates * the necessary intermediate {@link java.io.OutputStreamWriter * OutputStreamWriter}, which will encode characters using the provided * charset. * * @param file * The file to use as the destination of this print stream. If the * file exists, then it will be truncated to zero size; otherwise, * a new file will be created. The output will be written to the * file and is buffered. * * @param csn * The name of a supported {@linkplain java.nio.charset.Charset * charset} * * @throws FileNotFoundException * If the given file object does not denote an existing, writable * regular file and a new regular file of that name cannot be * created, or if some other error occurs while opening or * creating the file * * @throws SecurityException * If a security manager is present and {@link * SecurityManager#checkWrite checkWrite(file.getPath())} * denies write access to the file * * @throws UnsupportedEncodingException * If the named charset is not supported * * @since 1.5 */ public PrintStream(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException { // ensure charset is checked before the file is opened this(false, toCharset(csn), new FileOutputStream(file)); } /** Check to make sure that the stream has not been closed */ private void ensureOpen() throws IOException { if (out == null) throw new IOException("Stream closed"); } /** * Flushes the stream. This is done by writing any buffered output bytes to * the underlying output stream and then flushing that stream. * * @see java.io.OutputStream#flush() */ public void flush() { synchronized (this) { try { ensureOpen(); out.flush(); } catch (IOException x) { trouble = true; } } } private boolean closing = false; /* To avoid recursive closing */ // Android-changed: Lazily initialize textOut. private BufferedWriter getTextOut() { if (textOut == null) { PrintStream proxy = WeakProxy.forObject(this); charOut = charset != null ? new OutputStreamWriter(proxy, charset) : new OutputStreamWriter(proxy); textOut = new BufferedWriter(charOut); } return textOut; } /** * Closes the stream. This is done by flushing the stream and then closing * the underlying output stream. * * @see java.io.OutputStream#close() */ public void close() { synchronized (this) { if (! closing) { closing = true; try { // Android-changed: Lazily initialized. if (textOut != null) { textOut.close(); } out.close(); } catch (IOException x) { trouble = true; } textOut = null; charOut = null; out = null; } } } /** * Flushes the stream and checks its error state. The internal error state * is set to <code>true</code> when the underlying output stream throws an * <code>IOException</code> other than <code>InterruptedIOException</code>, * and when the <code>setError</code> method is invoked. If an operation * on the underlying output stream throws an * <code>InterruptedIOException</code>, then the <code>PrintStream</code> * converts the exception back into an interrupt by doing: * <pre> * Thread.currentThread().interrupt(); * </pre> * or the equivalent. * * @return <code>true</code> if and only if this stream has encountered an * <code>IOException</code> other than * <code>InterruptedIOException</code>, or the * <code>setError</code> method has been invoked */ public boolean checkError() { if (out != null) flush(); if (out instanceof java.io.PrintStream) { PrintStream ps = (PrintStream) out; return ps.checkError(); } return trouble; } /** * Sets the error state of the stream to <code>true</code>. * * <p> This method will cause subsequent invocations of {@link * #checkError()} to return <tt>true</tt> until {@link * #clearError()} is invoked. * * @since JDK1.1 */ protected void setError() { trouble = true; } /** * Clears the internal error state of this stream. * * <p> This method will cause subsequent invocations of {@link * #checkError()} to return <tt>false</tt> until another write * operation fails and invokes {@link #setError()}. * * @since 1.6 */ protected void clearError() { trouble = false; } /* * Exception-catching, synchronized output operations, * which also implement the write() methods of OutputStream */ /** * Writes the specified byte to this stream. If the byte is a newline and * automatic flushing is enabled then the <code>flush</code> method will be * invoked. * * <p> Note that the byte is written as given; to write a character that * will be translated according to the platform's default character * encoding, use the <code>print(char)</code> or <code>println(char)</code> * methods. * * @param b The byte to be written * @see #print(char) * @see #println(char) */ public void write(int b) { try { synchronized (this) { ensureOpen(); out.write(b); if ((b == '\n') && autoFlush) out.flush(); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } } /** * Writes <code>len</code> bytes from the specified byte array starting at * offset <code>off</code> to this stream. If automatic flushing is * enabled then the <code>flush</code> method will be invoked. * * <p> Note that the bytes will be written as given; to write characters * that will be translated according to the platform's default character * encoding, use the <code>print(char)</code> or <code>println(char)</code> * methods. * * @param buf A byte array * @param off Offset from which to start taking bytes * @param len Number of bytes to write */ public void write(byte buf[], int off, int len) { try { synchronized (this) { ensureOpen(); out.write(buf, off, len); if (autoFlush) out.flush(); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } } /* * The following private methods on the text- and character-output streams * always flush the stream buffers, so that writes to the underlying byte * stream occur as promptly as with the original PrintStream. */ private void write(char buf[]) { try { synchronized (this) { ensureOpen(); // Android-changed: Lazily initialized. BufferedWriter textOut = getTextOut(); textOut.write(buf); textOut.flushBuffer(); charOut.flushBuffer(); if (autoFlush) { for (int i = 0; i < buf.length; i++) if (buf[i] == '\n') out.flush(); } } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } } private void write(String s) { try { synchronized (this) { ensureOpen(); // Android-changed: Lazily initialized. BufferedWriter textOut = getTextOut(); textOut.write(s); textOut.flushBuffer(); charOut.flushBuffer(); if (autoFlush && (s.indexOf('\n') >= 0)) out.flush(); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } } private void newLine() { try { synchronized (this) { ensureOpen(); // Android-changed: Lazily initialized. BufferedWriter textOut = getTextOut(); textOut.newLine(); textOut.flushBuffer(); charOut.flushBuffer(); if (autoFlush) out.flush(); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } } /* Methods that do not terminate lines */ /** * Prints a boolean value. The string produced by <code>{@link * java.lang.String#valueOf(boolean)}</code> is translated into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param b The <code>boolean</code> to be printed */ public void print(boolean b) { write(b ? "true" : "false"); } /** * Prints a character. The character is translated into one or more bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param c The <code>char</code> to be printed */ public void print(char c) { write(String.valueOf(c)); } /** * Prints an integer. The string produced by <code>{@link * java.lang.String#valueOf(int)}</code> is translated into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param i The <code>int</code> to be printed * @see java.lang.Integer#toString(int) */ public void print(int i) { write(String.valueOf(i)); } /** * Prints a long integer. The string produced by <code>{@link * java.lang.String#valueOf(long)}</code> is translated into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param l The <code>long</code> to be printed * @see java.lang.Long#toString(long) */ public void print(long l) { write(String.valueOf(l)); } /** * Prints a floating-point number. The string produced by <code>{@link * java.lang.String#valueOf(float)}</code> is translated into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param f The <code>float</code> to be printed * @see java.lang.Float#toString(float) */ public void print(float f) { write(String.valueOf(f)); } /** * Prints a double-precision floating-point number. The string produced by * <code>{@link java.lang.String#valueOf(double)}</code> is translated into * bytes according to the platform's default character encoding, and these * bytes are written in exactly the manner of the <code>{@link * #write(int)}</code> method. * * @param d The <code>double</code> to be printed * @see java.lang.Double#toString(double) */ public void print(double d) { write(String.valueOf(d)); } /** * Prints an array of characters. The characters are converted into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param s The array of chars to be printed * * @throws NullPointerException If <code>s</code> is <code>null</code> */ public void print(char s[]) { write(s); } /** * Prints a string. If the argument is <code>null</code> then the string * <code>"null"</code> is printed. Otherwise, the string's characters are * converted into bytes according to the platform's default character * encoding, and these bytes are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param s The <code>String</code> to be printed */ public void print(String s) { if (s == null) { s = "null"; } write(s); } /** * Prints an object. The string produced by the <code>{@link * java.lang.String#valueOf(Object)}</code> method is translated into bytes * according to the platform's default character encoding, and these bytes * are written in exactly the manner of the * <code>{@link #write(int)}</code> method. * * @param obj The <code>Object</code> to be printed * @see java.lang.Object#toString() */ public void print(Object obj) { write(String.valueOf(obj)); } /* Methods that do terminate lines */ /** * Terminates the current line by writing the line separator string. The * line separator string is defined by the system property * <code>line.separator</code>, and is not necessarily a single newline * character (<code>'\n'</code>). */ public void println() { newLine(); } /** * Prints a boolean and then terminate the line. This method behaves as * though it invokes <code>{@link #print(boolean)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>boolean</code> to be printed */ public void println(boolean x) { synchronized (this) { print(x); newLine(); } } /** * Prints a character and then terminate the line. This method behaves as * though it invokes <code>{@link #print(char)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>char</code> to be printed. */ public void println(char x) { synchronized (this) { print(x); newLine(); } } /** * Prints an integer and then terminate the line. This method behaves as * though it invokes <code>{@link #print(int)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>int</code> to be printed. */ public void println(int x) { synchronized (this) { print(x); newLine(); } } /** * Prints a long and then terminate the line. This method behaves as * though it invokes <code>{@link #print(long)}</code> and then * <code>{@link #println()}</code>. * * @param x a The <code>long</code> to be printed. */ public void println(long x) { synchronized (this) { print(x); newLine(); } } /** * Prints a float and then terminate the line. This method behaves as * though it invokes <code>{@link #print(float)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>float</code> to be printed. */ public void println(float x) { synchronized (this) { print(x); newLine(); } } /** * Prints a double and then terminate the line. This method behaves as * though it invokes <code>{@link #print(double)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>double</code> to be printed. */ public void println(double x) { synchronized (this) { print(x); newLine(); } } /** * Prints an array of characters and then terminate the line. This method * behaves as though it invokes <code>{@link #print(char[])}</code> and * then <code>{@link #println()}</code>. * * @param x an array of chars to print. */ public void println(char x[]) { synchronized (this) { print(x); newLine(); } } /** * Prints a String and then terminate the line. This method behaves as * though it invokes <code>{@link #print(String)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>String</code> to be printed. */ public void println(String x) { synchronized (this) { print(x); newLine(); } } /** * Prints an Object and then terminate the line. This method calls * at first String.valueOf(x) to get the printed object's string value, * then behaves as * though it invokes <code>{@link #print(String)}</code> and then * <code>{@link #println()}</code>. * * @param x The <code>Object</code> to be printed. */ public void println(Object x) { String s = String.valueOf(x); synchronized (this) { print(s); newLine(); } } /** * A convenience method to write a formatted string to this output stream * using the specified format string and arguments. * * <p> An invocation of this method of the form <tt>out.printf(format, * args)</tt> behaves in exactly the same way as the invocation * * <pre> * out.format(format, args) </pre> * * @param format * A format string as described in <a * href="../util/Formatter.html#syntax">Format string syntax</a> * * @param args * Arguments referenced by the format specifiers in the format * string. If there are more arguments than format specifiers, the * extra arguments are ignored. The number of arguments is * variable and may be zero. The maximum number of arguments is * limited by the maximum dimension of a Java array as defined by * <cite>The Java&trade; Virtual Machine Specification</cite>. * The behaviour on a * <tt>null</tt> argument depends on the <a * href="../util/Formatter.html#syntax">conversion</a>. * * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other * illegal conditions. For specification of all possible * formatting errors, see the <a * href="../util/Formatter.html#detail">Details</a> section of the * formatter class specification. * * @throws NullPointerException * If the <tt>format</tt> is <tt>null</tt> * * @return This output stream * * @since 1.5 */ public PrintStream printf(String format, Object ... args) { return format(format, args); } /** * A convenience method to write a formatted string to this output stream * using the specified format string and arguments. * * <p> An invocation of this method of the form <tt>out.printf(l, format, * args)</tt> behaves in exactly the same way as the invocation * * <pre> * out.format(l, format, args) </pre> * * @param l * The {@linkplain java.util.Locale locale} to apply during * formatting. If <tt>l</tt> is <tt>null</tt> then no localization * is applied. * * @param format * A format string as described in <a * href="../util/Formatter.html#syntax">Format string syntax</a> * * @param args * Arguments referenced by the format specifiers in the format * string. If there are more arguments than format specifiers, the * extra arguments are ignored. The number of arguments is * variable and may be zero. The maximum number of arguments is * limited by the maximum dimension of a Java array as defined by * <cite>The Java&trade; Virtual Machine Specification</cite>. * The behaviour on a * <tt>null</tt> argument depends on the <a * href="../util/Formatter.html#syntax">conversion</a>. * * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other * illegal conditions. For specification of all possible * formatting errors, see the <a * href="../util/Formatter.html#detail">Details</a> section of the * formatter class specification. * * @throws NullPointerException * If the <tt>format</tt> is <tt>null</tt> * * @return This output stream * * @since 1.5 */ public PrintStream printf(Locale l, String format, Object ... args) { return format(l, format, args); } /** * Writes a formatted string to this output stream using the specified * format string and arguments. * * <p> The locale always used is the one returned by {@link * java.util.Locale#getDefault() Locale.getDefault()}, regardless of any * previous invocations of other formatting methods on this object. * * @param format * A format string as described in <a * href="../util/Formatter.html#syntax">Format string syntax</a> * * @param args * Arguments referenced by the format specifiers in the format * string. If there are more arguments than format specifiers, the * extra arguments are ignored. The number of arguments is * variable and may be zero. The maximum number of arguments is * limited by the maximum dimension of a Java array as defined by * <cite>The Java&trade; Virtual Machine Specification</cite>. * The behaviour on a * <tt>null</tt> argument depends on the <a * href="../util/Formatter.html#syntax">conversion</a>. * * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other * illegal conditions. For specification of all possible * formatting errors, see the <a * href="../util/Formatter.html#detail">Details</a> section of the * formatter class specification. * * @throws NullPointerException * If the <tt>format</tt> is <tt>null</tt> * * @return This output stream * * @since 1.5 */ public PrintStream format(String format, Object ... args) { try { synchronized (this) { ensureOpen(); if ((formatter == null) || (formatter.locale() != Locale.getDefault())) formatter = new Formatter((Appendable) WeakProxy.forObject(this)); formatter.format(Locale.getDefault(), format, args); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } return this; } /** * Writes a formatted string to this output stream using the specified * format string and arguments. * * @param l * The {@linkplain java.util.Locale locale} to apply during * formatting. If <tt>l</tt> is <tt>null</tt> then no localization * is applied. * * @param format * A format string as described in <a * href="../util/Formatter.html#syntax">Format string syntax</a> * * @param args * Arguments referenced by the format specifiers in the format * string. If there are more arguments than format specifiers, the * extra arguments are ignored. The number of arguments is * variable and may be zero. The maximum number of arguments is * limited by the maximum dimension of a Java array as defined by * <cite>The Java&trade; Virtual Machine Specification</cite>. * The behaviour on a * <tt>null</tt> argument depends on the <a * href="../util/Formatter.html#syntax">conversion</a>. * * @throws java.util.IllegalFormatException * If a format string contains an illegal syntax, a format * specifier that is incompatible with the given arguments, * insufficient arguments given the format string, or other * illegal conditions. For specification of all possible * formatting errors, see the <a * href="../util/Formatter.html#detail">Details</a> section of the * formatter class specification. * * @throws NullPointerException * If the <tt>format</tt> is <tt>null</tt> * * @return This output stream * * @since 1.5 */ public PrintStream format(Locale l, String format, Object ... args) { try { synchronized (this) { ensureOpen(); if ((formatter == null) || (formatter.locale() != l)) formatter = new Formatter(WeakProxy.forObject(this), l); formatter.format(l, format, args); } } catch (InterruptedIOException x) { Thread.currentThread().interrupt(); } catch (IOException x) { trouble = true; } return this; } /** * Appends the specified character sequence to this output stream. * * <p> An invocation of this method of the form <tt>out.append(csq)</tt> * behaves in exactly the same way as the invocation * * <pre> * out.print(csq.toString()) </pre> * * <p> Depending on the specification of <tt>toString</tt> for the * character sequence <tt>csq</tt>, the entire sequence may not be * appended. For instance, invoking then <tt>toString</tt> method of a * character buffer will return a subsequence whose content depends upon * the buffer's position and limit. * * @param csq * The character sequence to append. If <tt>csq</tt> is * <tt>null</tt>, then the four characters <tt>"null"</tt> are * appended to this output stream. * * @return This output stream * * @since 1.5 */ public PrintStream append(CharSequence csq) { if (csq == null) print("null"); else print(csq.toString()); return this; } /** * Appends a subsequence of the specified character sequence to this output * stream. * * <p> An invocation of this method of the form <tt>out.append(csq, start, * end)</tt> when <tt>csq</tt> is not <tt>null</tt>, behaves in * exactly the same way as the invocation * * <pre> * out.print(csq.subSequence(start, end).toString()) </pre> * * @param csq * The character sequence from which a subsequence will be * appended. If <tt>csq</tt> is <tt>null</tt>, then characters * will be appended as if <tt>csq</tt> contained the four * characters <tt>"null"</tt>. * * @param start * The index of the first character in the subsequence * * @param end * The index of the character following the last character in the * subsequence * * @return This output stream * * @throws IndexOutOfBoundsException * If <tt>start</tt> or <tt>end</tt> are negative, <tt>start</tt> * is greater than <tt>end</tt>, or <tt>end</tt> is greater than * <tt>csq.length()</tt> * * @since 1.5 */ public PrintStream append(CharSequence csq, int start, int end) { CharSequence cs = (csq == null ? "null" : csq); write(cs.subSequence(start, end).toString()); return this; } /** * Appends the specified character to this output stream. * * <p> An invocation of this method of the form <tt>out.append(c)</tt> * behaves in exactly the same way as the invocation * * <pre> * out.print(c) </pre> * * @param c * The 16-bit character to append * * @return This output stream * * @since 1.5 */ public PrintStream append(char c) { print(c); return this; } }
apache-2.0
swagiaal/kubernetes
test/e2e/resource_usage_gatherer.go
7581
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "bytes" "fmt" "math" "sort" "strconv" "strings" "sync" "text/tabwriter" "time" . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" utilruntime "k8s.io/kubernetes/pkg/util/runtime" ) const ( resourceDataGatheringPeriodSeconds = 60 ) type resourceConstraint struct { cpuConstraint float64 memoryConstraint uint64 } type containerResourceGatherer struct { usageTimeseries map[time.Time]resourceUsagePerContainer stopCh chan struct{} wg sync.WaitGroup } type SingleContainerSummary struct { Name string Cpu float64 Mem uint64 } // we can't have int here, as JSON does not accept integer keys. type ResourceUsageSummary map[string][]SingleContainerSummary func (s *ResourceUsageSummary) PrintHumanReadable() string { buf := &bytes.Buffer{} w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) for perc, summaries := range *s { buf.WriteString(fmt.Sprintf("%v percentile:\n", perc)) fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n") for _, summary := range summaries { fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024)) } w.Flush() } return buf.String() } func (s *ResourceUsageSummary) PrintJSON() string { return prettyPrintJSON(*s) } func (g *containerResourceGatherer) startGatheringData(c *client.Client, period time.Duration) { g.usageTimeseries = make(map[time.Time]resourceUsagePerContainer) g.wg.Add(1) g.stopCh = make(chan struct{}) go func() error { defer utilruntime.HandleCrash() defer g.wg.Done() for { select { case <-time.After(period): now := time.Now() data, err := g.getKubeSystemContainersResourceUsage(c) if err != nil { Logf("Error while getting resource usage: %v", err) continue } g.usageTimeseries[now] = data case <-g.stopCh: Logf("Stop channel is closed. Stopping gatherer.") return nil } } }() } func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]resourceConstraint) *ResourceUsageSummary { close(g.stopCh) Logf("Closed stop channel.") g.wg.Wait() Logf("Waitgroup finished.") if len(percentiles) == 0 { Logf("Warning! Empty percentile list for stopAndPrintData.") return &ResourceUsageSummary{} } stats := g.computePercentiles(g.usageTimeseries, percentiles) sortedKeys := []string{} for name := range stats[percentiles[0]] { sortedKeys = append(sortedKeys, name) } sort.Strings(sortedKeys) violatedConstraints := make([]string, 0) summary := make(ResourceUsageSummary) for _, perc := range percentiles { for _, name := range sortedKeys { usage := stats[perc][name] summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{ Name: name, Cpu: usage.CPUUsageInCores, Mem: usage.MemoryWorkingSetInBytes, }) // Verifying 99th percentile of resource usage if perc == 99 { // Name has a form: <pod_name>/<container_name> containerName := strings.Split(name, "/")[1] if constraint, ok := constraints[containerName]; ok { if usage.CPUUsageInCores > constraint.cpuConstraint { violatedConstraints = append( violatedConstraints, fmt.Sprintf("Container %v is using %v/%v CPU", name, usage.CPUUsageInCores, constraint.cpuConstraint, ), ) } if usage.MemoryWorkingSetInBytes > constraint.memoryConstraint { violatedConstraints = append( violatedConstraints, fmt.Sprintf("Container %v is using %v/%v MB of memory", name, float64(usage.MemoryWorkingSetInBytes)/(1024*1024), float64(constraint.memoryConstraint)/(1024*1024), ), ) } } } } } Expect(violatedConstraints).To(BeEmpty()) return &summary } func (g *containerResourceGatherer) computePercentiles(timeSeries map[time.Time]resourceUsagePerContainer, percentilesToCompute []int) map[int]resourceUsagePerContainer { if len(timeSeries) == 0 { return make(map[int]resourceUsagePerContainer) } dataMap := make(map[string]*usageDataPerContainer) for _, singleStatistic := range timeSeries { for name, data := range singleStatistic { if dataMap[name] == nil { dataMap[name] = &usageDataPerContainer{ cpuData: make([]float64, len(timeSeries)), memUseData: make([]uint64, len(timeSeries)), memWorkSetData: make([]uint64, len(timeSeries)), } } dataMap[name].cpuData = append(dataMap[name].cpuData, data.CPUUsageInCores) dataMap[name].memUseData = append(dataMap[name].memUseData, data.MemoryUsageInBytes) dataMap[name].memWorkSetData = append(dataMap[name].memWorkSetData, data.MemoryWorkingSetInBytes) } } for _, v := range dataMap { sort.Float64s(v.cpuData) sort.Sort(uint64arr(v.memUseData)) sort.Sort(uint64arr(v.memWorkSetData)) } result := make(map[int]resourceUsagePerContainer) for _, perc := range percentilesToCompute { data := make(resourceUsagePerContainer) for k, v := range dataMap { percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1 data[k] = &containerResourceUsage{ Name: k, CPUUsageInCores: v.cpuData[percentileIndex], MemoryUsageInBytes: v.memUseData[percentileIndex], MemoryWorkingSetInBytes: v.memWorkSetData[percentileIndex], } } result[perc] = data } return result } func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *client.Client) (resourceUsagePerContainer, error) { pods, err := c.Pods("kube-system").List(api.ListOptions{}) if err != nil { return resourceUsagePerContainer{}, err } nodes, err := c.Nodes().List(api.ListOptions{}) if err != nil { return resourceUsagePerContainer{}, err } containerIDToNameMap := make(map[string]string) containerIDs := make([]string, 0) for _, pod := range pods.Items { for _, container := range pod.Status.ContainerStatuses { containerID := strings.TrimPrefix(container.ContainerID, "docker:/") containerIDToNameMap[containerID] = pod.Name + "/" + container.Name containerIDs = append(containerIDs, containerID) } } mutex := sync.Mutex{} wg := sync.WaitGroup{} wg.Add(len(nodes.Items)) errors := make([]error, 0) nameToUsageMap := make(resourceUsagePerContainer, len(containerIDToNameMap)) for _, node := range nodes.Items { go func(nodeName string) { defer utilruntime.HandleCrash() defer wg.Done() nodeUsage, err := getOneTimeResourceUsageOnNode(c, nodeName, 15*time.Second, func() []string { return containerIDs }, true) mutex.Lock() defer mutex.Unlock() if err != nil { errors = append(errors, err) return } for k, v := range nodeUsage { nameToUsageMap[containerIDToNameMap[k]] = v } }(node.Name) } wg.Wait() if len(errors) != 0 { return resourceUsagePerContainer{}, fmt.Errorf("Errors while gathering usage data: %v", errors) } return nameToUsageMap, nil }
apache-2.0