target
stringlengths 20
113k
| src_fm
stringlengths 11
86.3k
| src_fm_fc
stringlengths 21
86.4k
| src_fm_fc_co
stringlengths 30
86.4k
| src_fm_fc_ms
stringlengths 42
86.8k
| src_fm_fc_ms_ff
stringlengths 43
86.8k
|
---|---|---|---|---|---|
@Test public void doubleSortWithExchange() { ExternalSort es1 = new ExternalSort(OpProps.prototype(0, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), ARBTRIARY_LEAF, Collections.emptyList(), false); SingleSender ss = new SingleSender(OpProps.prototype(1, Long.MAX_VALUE).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), Mockito.mock(BatchSchema.class), es1, 0, MinorFragmentIndexEndpoint.newBuilder().setMinorFragmentId(0).build()); Fragment f1 = new Fragment(); f1.addOperator(ss); Wrapper w1 = new Wrapper(f1, 0); w1.overrideEndpoints(Collections.singletonList(N1)); UnorderedReceiver or = new UnorderedReceiver(OpProps.prototype(1, Long.MAX_VALUE), Mockito.mock(BatchSchema.class), 0, Collections.emptyList(), false); ExternalSort es2 = new ExternalSort(OpProps.prototype(0, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), or, Collections.emptyList(), false); Fragment f2 = new Fragment(); f2.addOperator(es2); Wrapper w2 = new Wrapper(f2, 0); w2.overrideEndpoints(Collections.singletonList(N1)); MemoryAllocationUtilities.setMemory(options, ImmutableMap.of(f1, w1, f2, w2), 10); assertEquals(3l, es1.getProps().getMemLimit()); assertEquals(3l, es2.getProps().getMemLimit()); } | @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations(
final PhysicalPlan plan,
final OptionManager optionManager,
final GroupResourceInformation clusterInfo,
final PlanningSet planningSet,
final long allocatedMemoryPerQuery
); } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations(
final PhysicalPlan plan,
final OptionManager optionManager,
final GroupResourceInformation clusterInfo,
final PlanningSet planningSet,
final long allocatedMemoryPerQuery
); } |
@Test public void doubleSortWithExchangeUnbalancedNodes() { ExternalSort es1 = new ExternalSort(OpProps.prototype(0, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), ARBTRIARY_LEAF, Collections.emptyList(), false); SingleSender ss = new SingleSender(OpProps.prototype(1, Long.MAX_VALUE).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), Mockito.mock(BatchSchema.class), es1, 0, MinorFragmentIndexEndpoint.newBuilder().setMinorFragmentId(0).build()); Fragment f1 = new Fragment(); f1.addOperator(ss); Wrapper w1 = new Wrapper(f1, 0); w1.overrideEndpoints(Arrays.asList(N1, N2)); UnorderedReceiver or = new UnorderedReceiver(OpProps.prototype(1, Long.MAX_VALUE), Mockito.mock(BatchSchema.class), 0, Collections.emptyList(), false); ExternalSort es2 = new ExternalSort(OpProps.prototype(0, Long.MAX_VALUE).cloneWithMemoryExpensive(true).cloneWithMemoryFactor(options.getOption(SORT_FACTOR)).cloneWithBound(options.getOption(SORT_BOUNDED)), or, Collections.emptyList(), false); Fragment f2 = new Fragment(); f2.addOperator(es2); Wrapper w2 = new Wrapper(f2, 0); w2.overrideEndpoints(Collections.singletonList(N1)); MemoryAllocationUtilities.setMemory(options, ImmutableMap.of(f1, w1, f2, w2), 10); assertEquals(3l, es1.getProps().getMemLimit()); assertEquals(3l, es2.getProps().getMemLimit()); } | @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations(
final PhysicalPlan plan,
final OptionManager optionManager,
final GroupResourceInformation clusterInfo,
final PlanningSet planningSet,
final long allocatedMemoryPerQuery
); } | MemoryAllocationUtilities { @VisibleForTesting static void setMemory(final OptionManager optionManager, Map<Fragment, Wrapper> fragments, long maxMemoryPerNodePerQuery) { final ArrayListMultimap<NodeEndpoint, PhysicalOperator> consideredOps = ArrayListMultimap.create(); final ArrayListMultimap<NodeEndpoint, PhysicalOperator> nonConsideredOps = ArrayListMultimap.create(); long queryMaxAllocation = Long.MAX_VALUE; for(Entry<Fragment, Wrapper> entry: fragments.entrySet()) { FindConsideredOperators fco = new FindConsideredOperators(); entry.getKey().getRoot().accept(fco, null); for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { consideredOps.putAll(e, fco.getConsideredOperators()); } for(NodeEndpoint e : entry.getValue().getAssignedEndpoints()) { nonConsideredOps.putAll(e, fco.getNonConsideredOperators()); } } for(NodeEndpoint ep : consideredOps.keySet()) { long outsideReserve = nonConsideredOps.get(ep).stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); List<PhysicalOperator> ops = consideredOps.get(ep); long consideredOpsReserve = ops.stream().mapToLong(t -> t.getProps().getMemReserve()).sum(); if (outsideReserve + consideredOpsReserve > queryMaxAllocation) { throw UserException.resourceError() .message("Query was cancelled because the initial memory requirement (%s) is greater than the job memory limit set by the administrator (%s).", PrettyPrintUtils.bytePrint(outsideReserve + consideredOpsReserve, true), PrettyPrintUtils.bytePrint(queryMaxAllocation, true)) .build(logger); } final double totalWeights = ops.stream().mapToDouble(t -> t.getProps().getMemoryFactor()).sum(); final long memoryForHeavyOperations = maxMemoryPerNodePerQuery - outsideReserve; if(memoryForHeavyOperations < 1) { throw UserException.memoryError() .message("Query was cancelled because it exceeded the memory limits set by the administrator. Expected at least %s bytes, but only had %s available.", PrettyPrintUtils.bytePrint(outsideReserve, true), PrettyPrintUtils.bytePrint(maxMemoryPerNodePerQuery, true)) .build(logger); } final double baseWeight = memoryForHeavyOperations/totalWeights; ops.stream() .filter(op -> op.getProps().isMemoryBound()) .forEach(op -> { long targetValue = (long) (baseWeight * op.getProps().getMemoryFactor()); targetValue = Math.max(Math.min(targetValue, op.getProps().getMemLimit()), op.getProps().getMemReserve()); long lowLimit = op.getProps().getMemLowLimit(); long highLimit = op.getProps().getMemLimit(); op.getProps().setMemLimit(targetValue); if (targetValue < lowLimit) { op.getProps().setMemLimit(lowLimit); } if (targetValue > highLimit) { op.getProps().setMemLimit(highLimit); } }); } } private MemoryAllocationUtilities(); static void setupBoundedMemoryAllocations(
final PhysicalPlan plan,
final OptionManager optionManager,
final GroupResourceInformation clusterInfo,
final PlanningSet planningSet,
final long allocatedMemoryPerQuery
); } |
@Test public void testNullRuntimeFilterInfoObj() { List<RuntimeFilterProbeTarget> probeTargets = RuntimeFilterProbeTarget.getProbeTargets(null); assertTrue(probeTargets.isEmpty()); } | public static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo) { final List<RuntimeFilterProbeTarget> targets = new ArrayList<>(); try { if (runtimeFilterInfo==null) { return targets; } for (RuntimeFilterEntry entry : runtimeFilterInfo.getPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } for (RuntimeFilterEntry entry : runtimeFilterInfo.getNonPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addNonPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } } catch (RuntimeException e) { logger.error("Error while establishing probe scan targets from RuntimeFilterInfo", e); } return targets; } | RuntimeFilterProbeTarget { public static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo) { final List<RuntimeFilterProbeTarget> targets = new ArrayList<>(); try { if (runtimeFilterInfo==null) { return targets; } for (RuntimeFilterEntry entry : runtimeFilterInfo.getPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } for (RuntimeFilterEntry entry : runtimeFilterInfo.getNonPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addNonPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } } catch (RuntimeException e) { logger.error("Error while establishing probe scan targets from RuntimeFilterInfo", e); } return targets; } } | RuntimeFilterProbeTarget { public static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo) { final List<RuntimeFilterProbeTarget> targets = new ArrayList<>(); try { if (runtimeFilterInfo==null) { return targets; } for (RuntimeFilterEntry entry : runtimeFilterInfo.getPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } for (RuntimeFilterEntry entry : runtimeFilterInfo.getNonPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addNonPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } } catch (RuntimeException e) { logger.error("Error while establishing probe scan targets from RuntimeFilterInfo", e); } return targets; } RuntimeFilterProbeTarget(int probeScanMajorFragmentId, int probeScanOperatorId); } | RuntimeFilterProbeTarget { public static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo) { final List<RuntimeFilterProbeTarget> targets = new ArrayList<>(); try { if (runtimeFilterInfo==null) { return targets; } for (RuntimeFilterEntry entry : runtimeFilterInfo.getPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } for (RuntimeFilterEntry entry : runtimeFilterInfo.getNonPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addNonPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } } catch (RuntimeException e) { logger.error("Error while establishing probe scan targets from RuntimeFilterInfo", e); } return targets; } RuntimeFilterProbeTarget(int probeScanMajorFragmentId, int probeScanOperatorId); boolean isSameProbeCoordinate(int majorFragmentId, int operatorId); List<String> getPartitionBuildTableKeys(); List<String> getPartitionProbeTableKeys(); List<String> getNonPartitionBuildTableKeys(); List<String> getNonPartitionProbeTableKeys(); int getProbeScanMajorFragmentId(); int getProbeScanOperatorId(); @Override String toString(); String toTargetIdString(); static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo); } | RuntimeFilterProbeTarget { public static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo) { final List<RuntimeFilterProbeTarget> targets = new ArrayList<>(); try { if (runtimeFilterInfo==null) { return targets; } for (RuntimeFilterEntry entry : runtimeFilterInfo.getPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } for (RuntimeFilterEntry entry : runtimeFilterInfo.getNonPartitionJoinColumns()) { RuntimeFilterProbeTarget probeTarget = findOrCreateNew(targets, entry); probeTarget.addNonPartitionKey(entry.getBuildFieldName(), entry.getProbeFieldName()); } } catch (RuntimeException e) { logger.error("Error while establishing probe scan targets from RuntimeFilterInfo", e); } return targets; } RuntimeFilterProbeTarget(int probeScanMajorFragmentId, int probeScanOperatorId); boolean isSameProbeCoordinate(int majorFragmentId, int operatorId); List<String> getPartitionBuildTableKeys(); List<String> getPartitionProbeTableKeys(); List<String> getNonPartitionBuildTableKeys(); List<String> getNonPartitionProbeTableKeys(); int getProbeScanMajorFragmentId(); int getProbeScanOperatorId(); @Override String toString(); String toTargetIdString(); static List<RuntimeFilterProbeTarget> getProbeTargets(RuntimeFilterInfo runtimeFilterInfo); } |
@Test public void testParseJsonPath4() throws Exception { JsonPath p = JSONElementLocator.parsePath("value[0].a[1]"); assertEquals(p.toString(), 3, p.size()); assertEquals(new JsonPath(new ArrayJsonPathElement(0), new ObjectJsonPathElement("a"), new ArrayJsonPathElement(1)), p); } | public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); } |
@Test public void testSetup() { try (final BloomFilter bloomFilter = new BloomFilter(bfTestAllocator, TEST_NAME, 40)) { bloomFilter.setup(); assertEquals(bloomFilter.getDataBuffer().capacity(), bloomFilter.getSizeInBytes()); String expectedName = TEST_NAME.substring(TEST_NAME.length() - 24); assertEquals("BoomFilter.getName() is incorrect", expectedName, bloomFilter.getName()); byte[] nameBytes = new byte[24]; bloomFilter.getDataBuffer().getBytes(bloomFilter.getSizeInBytes() - 32, nameBytes); assertEquals("Name in meta bytes not set correctly.", expectedName, new String(nameBytes, StandardCharsets.UTF_8)); assertEquals("Reader index not set correctly", 0, bloomFilter.getDataBuffer().readerIndex()); assertEquals("Writer index not set correctly", bloomFilter.getSizeInBytes(), bloomFilter.getDataBuffer().writerIndex()); for (long i = 0; i < bloomFilter.getSizeInBytes() - 32; i += 8) { long block = bloomFilter.getDataBuffer().getLong(i); assertEquals("Found unclean buffer state", 0L, block); } } } | public void setup() { checkNotNull(this.allocator, "Setup not required for deserialized objects."); this.dataBuffer = this.allocator.buffer(this.sizeInBytes + META_BYTES_CNT); setup(dataBuffer); dataBuffer.writerIndex(0); for (int i = 0; i < sizeInBytes; i += 8) { dataBuffer.writeLong(0l); } byte[] metaBytes = new byte[24]; byte[] nameBytesAll = name.getBytes(StandardCharsets.UTF_8); System.arraycopy(name.getBytes(StandardCharsets.UTF_8), Math.max(0, nameBytesAll.length - 24), metaBytes, 0, Math.min(24, nameBytesAll.length)); this.name = new String(metaBytes, StandardCharsets.UTF_8); this.dataBuffer.writeBytes(metaBytes); this.dataBuffer.writeLong(0L); this.dataBuffer.readerIndex(0); this.numBitsSetLoc = dataBuffer.memoryAddress() + sizeInBytes + META_BYTES_CNT - 8; logger.debug("Bloomfilter {} set up completed.", this.name); } | BloomFilter implements AutoCloseable { public void setup() { checkNotNull(this.allocator, "Setup not required for deserialized objects."); this.dataBuffer = this.allocator.buffer(this.sizeInBytes + META_BYTES_CNT); setup(dataBuffer); dataBuffer.writerIndex(0); for (int i = 0; i < sizeInBytes; i += 8) { dataBuffer.writeLong(0l); } byte[] metaBytes = new byte[24]; byte[] nameBytesAll = name.getBytes(StandardCharsets.UTF_8); System.arraycopy(name.getBytes(StandardCharsets.UTF_8), Math.max(0, nameBytesAll.length - 24), metaBytes, 0, Math.min(24, nameBytesAll.length)); this.name = new String(metaBytes, StandardCharsets.UTF_8); this.dataBuffer.writeBytes(metaBytes); this.dataBuffer.writeLong(0L); this.dataBuffer.readerIndex(0); this.numBitsSetLoc = dataBuffer.memoryAddress() + sizeInBytes + META_BYTES_CNT - 8; logger.debug("Bloomfilter {} set up completed.", this.name); } } | BloomFilter implements AutoCloseable { public void setup() { checkNotNull(this.allocator, "Setup not required for deserialized objects."); this.dataBuffer = this.allocator.buffer(this.sizeInBytes + META_BYTES_CNT); setup(dataBuffer); dataBuffer.writerIndex(0); for (int i = 0; i < sizeInBytes; i += 8) { dataBuffer.writeLong(0l); } byte[] metaBytes = new byte[24]; byte[] nameBytesAll = name.getBytes(StandardCharsets.UTF_8); System.arraycopy(name.getBytes(StandardCharsets.UTF_8), Math.max(0, nameBytesAll.length - 24), metaBytes, 0, Math.min(24, nameBytesAll.length)); this.name = new String(metaBytes, StandardCharsets.UTF_8); this.dataBuffer.writeBytes(metaBytes); this.dataBuffer.writeLong(0L); this.dataBuffer.readerIndex(0); this.numBitsSetLoc = dataBuffer.memoryAddress() + sizeInBytes + META_BYTES_CNT - 8; logger.debug("Bloomfilter {} set up completed.", this.name); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); } | BloomFilter implements AutoCloseable { public void setup() { checkNotNull(this.allocator, "Setup not required for deserialized objects."); this.dataBuffer = this.allocator.buffer(this.sizeInBytes + META_BYTES_CNT); setup(dataBuffer); dataBuffer.writerIndex(0); for (int i = 0; i < sizeInBytes; i += 8) { dataBuffer.writeLong(0l); } byte[] metaBytes = new byte[24]; byte[] nameBytesAll = name.getBytes(StandardCharsets.UTF_8); System.arraycopy(name.getBytes(StandardCharsets.UTF_8), Math.max(0, nameBytesAll.length - 24), metaBytes, 0, Math.min(24, nameBytesAll.length)); this.name = new String(metaBytes, StandardCharsets.UTF_8); this.dataBuffer.writeBytes(metaBytes); this.dataBuffer.writeLong(0L); this.dataBuffer.readerIndex(0); this.numBitsSetLoc = dataBuffer.memoryAddress() + sizeInBytes + META_BYTES_CNT - 8; logger.debug("Bloomfilter {} set up completed.", this.name); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } | BloomFilter implements AutoCloseable { public void setup() { checkNotNull(this.allocator, "Setup not required for deserialized objects."); this.dataBuffer = this.allocator.buffer(this.sizeInBytes + META_BYTES_CNT); setup(dataBuffer); dataBuffer.writerIndex(0); for (int i = 0; i < sizeInBytes; i += 8) { dataBuffer.writeLong(0l); } byte[] metaBytes = new byte[24]; byte[] nameBytesAll = name.getBytes(StandardCharsets.UTF_8); System.arraycopy(name.getBytes(StandardCharsets.UTF_8), Math.max(0, nameBytesAll.length - 24), metaBytes, 0, Math.min(24, nameBytesAll.length)); this.name = new String(metaBytes, StandardCharsets.UTF_8); this.dataBuffer.writeBytes(metaBytes); this.dataBuffer.writeLong(0L); this.dataBuffer.readerIndex(0); this.numBitsSetLoc = dataBuffer.memoryAddress() + sizeInBytes + META_BYTES_CNT - 8; logger.debug("Bloomfilter {} set up completed.", this.name); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } |
@Test public void testIsCrossingMaxFpp() { try (final BloomFilter bloomFilter = new BloomFilter(bfTestAllocator, TEST_NAME, 64); final ArrowBuf keyBuf = bfTestAllocator.buffer(36)) { bloomFilter.setup(); for (int i = 0; i < 1_000_000; i++) { bloomFilter.put(writeKey(keyBuf, UUID.randomUUID().toString()), 36); if (bloomFilter.getExpectedFPP() > 0.05) { break; } } assertTrue(bloomFilter.isCrossingMaxFPP()); } } | public boolean isCrossingMaxFPP() { return getExpectedFPP() > (5 * FPP); } | BloomFilter implements AutoCloseable { public boolean isCrossingMaxFPP() { return getExpectedFPP() > (5 * FPP); } } | BloomFilter implements AutoCloseable { public boolean isCrossingMaxFPP() { return getExpectedFPP() > (5 * FPP); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); } | BloomFilter implements AutoCloseable { public boolean isCrossingMaxFPP() { return getExpectedFPP() > (5 * FPP); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } | BloomFilter implements AutoCloseable { public boolean isCrossingMaxFPP() { return getExpectedFPP() > (5 * FPP); } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } |
@Test public void testGetOptimalSize() { assertEquals(40, BloomFilter.getOptimalSize(1)); assertEquals(40, BloomFilter.getOptimalSize(4)); assertEquals(152, BloomFilter.getOptimalSize(100)); assertEquals(1_232, BloomFilter.getOptimalSize(1_000)); assertEquals(1_198_168, BloomFilter.getOptimalSize(1_000_000)); assertEquals(1_198_132_336, BloomFilter.getOptimalSize(1_000_000_000)); } | public static long getOptimalSize(long expectedInsertions) { checkArgument(expectedInsertions > 0); long optimalSize = (long) (-expectedInsertions * Math.log(FPP) / (Math.log(2) * Math.log(2))) / 8; optimalSize = ((optimalSize + 8) / 8) * 8; return optimalSize + META_BYTES_CNT; } | BloomFilter implements AutoCloseable { public static long getOptimalSize(long expectedInsertions) { checkArgument(expectedInsertions > 0); long optimalSize = (long) (-expectedInsertions * Math.log(FPP) / (Math.log(2) * Math.log(2))) / 8; optimalSize = ((optimalSize + 8) / 8) * 8; return optimalSize + META_BYTES_CNT; } } | BloomFilter implements AutoCloseable { public static long getOptimalSize(long expectedInsertions) { checkArgument(expectedInsertions > 0); long optimalSize = (long) (-expectedInsertions * Math.log(FPP) / (Math.log(2) * Math.log(2))) / 8; optimalSize = ((optimalSize + 8) / 8) * 8; return optimalSize + META_BYTES_CNT; } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); } | BloomFilter implements AutoCloseable { public static long getOptimalSize(long expectedInsertions) { checkArgument(expectedInsertions > 0); long optimalSize = (long) (-expectedInsertions * Math.log(FPP) / (Math.log(2) * Math.log(2))) / 8; optimalSize = ((optimalSize + 8) / 8) * 8; return optimalSize + META_BYTES_CNT; } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } | BloomFilter implements AutoCloseable { public static long getOptimalSize(long expectedInsertions) { checkArgument(expectedInsertions > 0); long optimalSize = (long) (-expectedInsertions * Math.log(FPP) / (Math.log(2) * Math.log(2))) / 8; optimalSize = ((optimalSize + 8) / 8) * 8; return optimalSize + META_BYTES_CNT; } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } |
@Test public void testClose() { try (final BloomFilter f1 = new BloomFilter(bfTestAllocator, TEST_NAME, 64)) { f1.setup(); f1.getDataBuffer().retain(); assertEquals(2, f1.getDataBuffer().refCnt()); f1.close(); assertEquals(1, f1.getDataBuffer().refCnt()); } } | @Override public void close() { logger.debug("Closing bloomfilter {}'s data buffer. RefCount {}", this.name, dataBuffer.refCnt()); try { dataBuffer.close(); } catch (Exception e) { logger.error("Error while closing bloomfilter " + this.name, e); } } | BloomFilter implements AutoCloseable { @Override public void close() { logger.debug("Closing bloomfilter {}'s data buffer. RefCount {}", this.name, dataBuffer.refCnt()); try { dataBuffer.close(); } catch (Exception e) { logger.error("Error while closing bloomfilter " + this.name, e); } } } | BloomFilter implements AutoCloseable { @Override public void close() { logger.debug("Closing bloomfilter {}'s data buffer. RefCount {}", this.name, dataBuffer.refCnt()); try { dataBuffer.close(); } catch (Exception e) { logger.error("Error while closing bloomfilter " + this.name, e); } } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); } | BloomFilter implements AutoCloseable { @Override public void close() { logger.debug("Closing bloomfilter {}'s data buffer. RefCount {}", this.name, dataBuffer.refCnt()); try { dataBuffer.close(); } catch (Exception e) { logger.error("Error while closing bloomfilter " + this.name, e); } } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } | BloomFilter implements AutoCloseable { @Override public void close() { logger.debug("Closing bloomfilter {}'s data buffer. RefCount {}", this.name, dataBuffer.refCnt()); try { dataBuffer.close(); } catch (Exception e) { logger.error("Error while closing bloomfilter " + this.name, e); } } BloomFilter(BufferAllocator bufferAllocator, String name, long minSizeBytes); private BloomFilter(ArrowBuf dataBuffer); void setup(); String getName(); long getSizeInBytes(); static BloomFilter prepareFrom(ArrowBuf dataBuffer); ArrowBuf getDataBuffer(); boolean mightContain(ArrowBuf bloomFilterKey, int length); boolean put(ArrowBuf bloomFilterKey, int length); double getExpectedFPP(); boolean isCrossingMaxFPP(); long getOptimalInsertions(); static long getOptimalSize(long expectedInsertions); void merge(BloomFilter that); @VisibleForTesting long getNumBitsSet(); @Override String toString(); @Override void close(); } |
@Test public void testCopyUtf8() throws Exception { testCopyUtf8Helper(new byte[] {'g', 'o', 'o', 'd', 'v', 'a', 'l'}, "goodval"); testCopyUtf8Helper(new byte[] {'b', 'a', 'd', (byte)0xff, 'v', 'a', 'l'}, "badval"); testCopyUtf8Helper(new byte[] {(byte)0xf9, 'g', 'o', 'o', 'd', ' ', 'p', 'a', 'r', 't'}, "good part"); testCopyUtf8Helper(new byte[] {'t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'o', 'k', (byte)0xfe}, "this is ok"); testCopyUtf8Helper(new byte[] {'f', 'a', 'k', 'e', ' ', (byte) 0xC0, '2', 'B', ' ', 's', 'e', 'q', }, "fake 2B seq"); } | public static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out) { int i = 0; int errBytes = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i - errBytes, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { errBytes++; i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j - errBytes, in.getByte(start + j)); } i += seqLen; } } return end - start - errBytes; } | StringFunctionUtil { public static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out) { int i = 0; int errBytes = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i - errBytes, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { errBytes++; i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j - errBytes, in.getByte(start + j)); } i += seqLen; } } return end - start - errBytes; } } | StringFunctionUtil { public static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out) { int i = 0; int errBytes = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i - errBytes, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { errBytes++; i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j - errBytes, in.getByte(start + j)); } i += seqLen; } } return end - start - errBytes; } } | StringFunctionUtil { public static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out) { int i = 0; int errBytes = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i - errBytes, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { errBytes++; i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j - errBytes, in.getByte(start + j)); } i += seqLen; } } return end - start - errBytes; } static int getUTF8CharLength(ByteBuf buffer, int start, int end, final FunctionErrorContext
errCtx); static int getUTF8CharPosition(ByteBuf buffer, int start, int end, int charLength, final
FunctionErrorContext errCtx); static Pattern compilePattern(String regex, FunctionErrorContext errCtx); static Pattern compilePattern(String regex, int flags, FunctionErrorContext errCtx); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd, int offset); static int parseBinaryStringNoFormat(ByteBuf str, int strStart, int strEnd, ByteBuf out,
FunctionErrorContext errCtx); static int utf8CharLen(ByteBuf buffer, int idx, final FunctionErrorContext errCtx); static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out); static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte
replacement); } | StringFunctionUtil { public static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out) { int i = 0; int errBytes = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i - errBytes, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { errBytes++; i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j - errBytes, in.getByte(start + j)); } i += seqLen; } } return end - start - errBytes; } static int getUTF8CharLength(ByteBuf buffer, int start, int end, final FunctionErrorContext
errCtx); static int getUTF8CharPosition(ByteBuf buffer, int start, int end, int charLength, final
FunctionErrorContext errCtx); static Pattern compilePattern(String regex, FunctionErrorContext errCtx); static Pattern compilePattern(String regex, int flags, FunctionErrorContext errCtx); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd, int offset); static int parseBinaryStringNoFormat(ByteBuf str, int strStart, int strEnd, ByteBuf out,
FunctionErrorContext errCtx); static int utf8CharLen(ByteBuf buffer, int idx, final FunctionErrorContext errCtx); static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out); static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte
replacement); } |
@Test public void testCopyReplaceUtf8() throws Exception { testReplaceUtf8Helper(new byte[] {'g', 'o', 'o', 'd', 'v', 'a', 'l'}, (byte)'?', "goodval"); testReplaceUtf8Helper(new byte[] {'b', 'a', 'd', (byte)0xff, 'v', 'a', 'l'}, (byte)'?', "bad?val"); testReplaceUtf8Helper(new byte[] {(byte)0xf9, 'g', 'o', 'o', 'd', ' ', 'p', 'a', 'r', 't'}, (byte)'X', "Xgood part"); testReplaceUtf8Helper(new byte[] {'t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'o', 'k', (byte)0xfe}, (byte)'|', "this is ok|"); testReplaceUtf8Helper(new byte[] {'f', 'a', 'k', 'e', ' ', (byte) 0xC0, '2', 'B', ' ', 's', 'e', 'q', }, (byte)'?', "fake ?2B seq"); } | public static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte replacement) { int i = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { out.setByte(i, replacement); i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j, in.getByte(start + j)); } i += seqLen; } } return end - start; } | StringFunctionUtil { public static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte replacement) { int i = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { out.setByte(i, replacement); i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j, in.getByte(start + j)); } i += seqLen; } } return end - start; } } | StringFunctionUtil { public static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte replacement) { int i = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { out.setByte(i, replacement); i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j, in.getByte(start + j)); } i += seqLen; } } return end - start; } } | StringFunctionUtil { public static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte replacement) { int i = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { out.setByte(i, replacement); i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j, in.getByte(start + j)); } i += seqLen; } } return end - start; } static int getUTF8CharLength(ByteBuf buffer, int start, int end, final FunctionErrorContext
errCtx); static int getUTF8CharPosition(ByteBuf buffer, int start, int end, int charLength, final
FunctionErrorContext errCtx); static Pattern compilePattern(String regex, FunctionErrorContext errCtx); static Pattern compilePattern(String regex, int flags, FunctionErrorContext errCtx); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd, int offset); static int parseBinaryStringNoFormat(ByteBuf str, int strStart, int strEnd, ByteBuf out,
FunctionErrorContext errCtx); static int utf8CharLen(ByteBuf buffer, int idx, final FunctionErrorContext errCtx); static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out); static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte
replacement); } | StringFunctionUtil { public static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte replacement) { int i = 0; while (start + i < end) { byte b = in.getByte(start + i); if (b >= 0) { out.setByte(i, b); i++; continue; } int seqLen = utf8CharLenNoThrow(in, start + i); if (seqLen == 0 || (start + i + seqLen) > end || !GuavaUtf8.isUtf8(in, start + i, start + i + seqLen)) { out.setByte(i, replacement); i++; } else { for (int j = i; j < i + seqLen; j++) { out.setByte(j, in.getByte(start + j)); } i += seqLen; } } return end - start; } static int getUTF8CharLength(ByteBuf buffer, int start, int end, final FunctionErrorContext
errCtx); static int getUTF8CharPosition(ByteBuf buffer, int start, int end, int charLength, final
FunctionErrorContext errCtx); static Pattern compilePattern(String regex, FunctionErrorContext errCtx); static Pattern compilePattern(String regex, int flags, FunctionErrorContext errCtx); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd); static int stringLeftMatchUTF8(ByteBuf str, int strStart, int strEnd,
ByteBuf substr, int subStart, int subEnd, int offset); static int parseBinaryStringNoFormat(ByteBuf str, int strStart, int strEnd, ByteBuf out,
FunctionErrorContext errCtx); static int utf8CharLen(ByteBuf buffer, int idx, final FunctionErrorContext errCtx); static int copyUtf8(ByteBuf in, final int start, final int end, ArrowBuf out); static int copyReplaceUtf8(ByteBuf in, final int start, final int end, ByteBuf out, byte
replacement); } |
@Test public void testIntSpec() throws Exception{ String columnName = "i"; Integer expectedValue = 12322; PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setInteger(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, Integer.class, expectedValue); } | public void setInteger(int position, Integer value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setInteger(int position, Integer value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setInteger(int position, Integer value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setInteger(int position, Integer value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setInteger(int position, Integer value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testParseJsonPath5() throws Exception { JsonPath p = JSONElementLocator.parsePath("value.a[0].b"); assertEquals(p.toString(), 3, p.size()); assertEquals(new JsonPath(new ObjectJsonPathElement("a"), new ArrayJsonPathElement(0), new ObjectJsonPathElement("b")), p); } | public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); } | JSONElementLocator { public static JsonPath parsePath(String path) { if (path.startsWith(VALUE_PLACEHOLDER)) { return new JsonPath(path.substring(VALUE_PLACEHOLDER.length())); } throw new IllegalArgumentException(path + " must start with 'value'"); } JSONElementLocator(String text); static JsonPath parsePath(String path); Interval locatePath(JsonPath searchedPath); JsonSelection locate(int selectionStart, int selectionEnd); } |
@Test public void testStringSpec() throws Exception{ String columnName = "data"; String expectedValue = "abc"; PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setString(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, String.class, expectedValue); } | public void setString(int position, String value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setString(int position, String value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setString(int position, String value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setString(int position, String value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setString(int position, String value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testLongSpec() throws Exception{ String columnName = "id"; Long expectedValue = 123L; PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setLong(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, Long.class, expectedValue); } | public void setLong(int position, Long value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setLong(int position, Long value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setLong(int position, Long value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setLong(int position, Long value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setLong(int position, Long value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testBigDecimalpec() throws Exception{ String columnName = "dec_9_0"; BigDecimal expectedValue = new BigDecimal(234); PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setBigDecimal(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, BigDecimal.class, expectedValue); } | public void setBigDecimal(int position, BigDecimal value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setBigDecimal(int position, BigDecimal value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setBigDecimal(int position, BigDecimal value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setBigDecimal(int position, BigDecimal value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setBigDecimal(int position, BigDecimal value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testFloatSpec() throws Exception{ String columnName = "f"; Float expectedValue = 1.23f; PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setFloat(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, Float.class, expectedValue); } | public void setFloat(int position, Float value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setFloat(int position, Float value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setFloat(int position, Float value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setFloat(int position, Float value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setFloat(int position, Float value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testDoubleSpec() throws Exception{ String columnName = "d"; Double expectedValue = Double.valueOf(1.23f); PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setDouble(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, Double.class, expectedValue); } | public void setDouble(int position, Double value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setDouble(int position, Double value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setDouble(int position, Double value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setDouble(int position, Double value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setDouble(int position, Double value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testBooleanSpec() throws Exception{ String columnName = "b"; Boolean expectedValue = true; PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setBoolean(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, Boolean.class, expectedValue); } | public void setBoolean(int position, Boolean value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setBoolean(int position, Boolean value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setBoolean(int position, Boolean value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setBoolean(int position, Boolean value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setBoolean(int position, Boolean value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void testBinarySpec() throws Exception{ String columnName = "bytes"; byte[] expectedValue = "test".getBytes(); PartitionSpec partitionSpec = PartitionSpec .builderFor(schema) .identity(columnName) .build(); IcebergPartitionData icebergPartitionData = new IcebergPartitionData(partitionSpec.partitionType()); icebergPartitionData.setBytes(0, expectedValue); verifyPartitionValue(partitionSpec, icebergPartitionData, columnName, ByteBuffer.class, expectedValue); } | public void setBytes(int position, byte[] value) { set(position, value); } | IcebergPartitionData implements StructLike, Serializable { public void setBytes(int position, byte[] value) { set(position, value); } } | IcebergPartitionData implements StructLike, Serializable { public void setBytes(int position, byte[] value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); } | IcebergPartitionData implements StructLike, Serializable { public void setBytes(int position, byte[] value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } | IcebergPartitionData implements StructLike, Serializable { public void setBytes(int position, byte[] value) { set(position, value); } IcebergPartitionData(Types.StructType partitionType); private IcebergPartitionData(IcebergPartitionData toCopy); Type getType(int pos); void clear(); @Override int size(); @Override @SuppressWarnings("unchecked") T get(int pos, Class<T> javaClass); Object get(int pos); @Override void set(int pos, T value); @Override String toString(); IcebergPartitionData copy(); @Override boolean equals(Object o); @Override int hashCode(); static Object[] copyData(Types.StructType type, Object[] data); void setInteger(int position, Integer value); void setLong(int position, Long value); void setFloat(int position, Float value); void setDouble(int position, Double value); void setBoolean(int position, Boolean value); void setString(int position, String value); void setBytes(int position, byte[] value); void setBigDecimal(int position, BigDecimal value); void set(int position, CompleteType type, ValueVector vector, int offset); } |
@Test public void match() throws Exception { IcebergFormatMatcher matcher = new IcebergFormatMatcher(null); FileSystem fs = HadoopFileSystem.getLocal(new Configuration()); File root = tempDir.newFolder(); FileSelection fileSelection = FileSelection.create(fs, Path.of(root.toURI())); boolean matched; assertFalse(matcher.matches(fs, fileSelection, null)); File metadata = new File(root, "metadata"); metadata.mkdir(); assertFalse(matcher.matches(fs, fileSelection, null)); File versionHint = new File(metadata, "version-hint.text"); versionHint.createNewFile(); File metadataJsonNoDot = new File(metadata, "v9metadata.json"); metadataJsonNoDot.createNewFile(); assertFalse(matcher.matches(fs, fileSelection, null)); File metadataJson = new File(metadata, "v9.metadata.json"); metadataJson.createNewFile(); matched = matcher.matches(fs, fileSelection, null); assertTrue(matched); } | @Override public boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory) throws IOException { Path rootDir = Path.of(fileSelection.getSelectionRoot()); Path metaDir = rootDir.resolve(METADATA_DIR_NAME); if (!fs.isDirectory(rootDir) || !fs.exists(metaDir) || !fs.isDirectory(metaDir)) { return false; } Path versionHintPath = metaDir.resolve(VERSION_HINT_FILE_NAME); if (!fs.exists(versionHintPath) || !fs.isFile(versionHintPath)) { return false; } for (FileAttributes file : fs.list(metaDir)) { if (METADATA_FILE_PATTERN.matcher(file.getPath().getName()).matches()) { return true; } } return false; } | IcebergFormatMatcher extends FormatMatcher { @Override public boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory) throws IOException { Path rootDir = Path.of(fileSelection.getSelectionRoot()); Path metaDir = rootDir.resolve(METADATA_DIR_NAME); if (!fs.isDirectory(rootDir) || !fs.exists(metaDir) || !fs.isDirectory(metaDir)) { return false; } Path versionHintPath = metaDir.resolve(VERSION_HINT_FILE_NAME); if (!fs.exists(versionHintPath) || !fs.isFile(versionHintPath)) { return false; } for (FileAttributes file : fs.list(metaDir)) { if (METADATA_FILE_PATTERN.matcher(file.getPath().getName()).matches()) { return true; } } return false; } } | IcebergFormatMatcher extends FormatMatcher { @Override public boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory) throws IOException { Path rootDir = Path.of(fileSelection.getSelectionRoot()); Path metaDir = rootDir.resolve(METADATA_DIR_NAME); if (!fs.isDirectory(rootDir) || !fs.exists(metaDir) || !fs.isDirectory(metaDir)) { return false; } Path versionHintPath = metaDir.resolve(VERSION_HINT_FILE_NAME); if (!fs.exists(versionHintPath) || !fs.isFile(versionHintPath)) { return false; } for (FileAttributes file : fs.list(metaDir)) { if (METADATA_FILE_PATTERN.matcher(file.getPath().getName()).matches()) { return true; } } return false; } IcebergFormatMatcher(FormatPlugin plugin); } | IcebergFormatMatcher extends FormatMatcher { @Override public boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory) throws IOException { Path rootDir = Path.of(fileSelection.getSelectionRoot()); Path metaDir = rootDir.resolve(METADATA_DIR_NAME); if (!fs.isDirectory(rootDir) || !fs.exists(metaDir) || !fs.isDirectory(metaDir)) { return false; } Path versionHintPath = metaDir.resolve(VERSION_HINT_FILE_NAME); if (!fs.exists(versionHintPath) || !fs.isFile(versionHintPath)) { return false; } for (FileAttributes file : fs.list(metaDir)) { if (METADATA_FILE_PATTERN.matcher(file.getPath().getName()).matches()) { return true; } } return false; } IcebergFormatMatcher(FormatPlugin plugin); @Override FormatPlugin getFormatPlugin(); @Override boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory); } | IcebergFormatMatcher extends FormatMatcher { @Override public boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory) throws IOException { Path rootDir = Path.of(fileSelection.getSelectionRoot()); Path metaDir = rootDir.resolve(METADATA_DIR_NAME); if (!fs.isDirectory(rootDir) || !fs.exists(metaDir) || !fs.isDirectory(metaDir)) { return false; } Path versionHintPath = metaDir.resolve(VERSION_HINT_FILE_NAME); if (!fs.exists(versionHintPath) || !fs.isFile(versionHintPath)) { return false; } for (FileAttributes file : fs.list(metaDir)) { if (METADATA_FILE_PATTERN.matcher(file.getPath().getName()).matches()) { return true; } } return false; } IcebergFormatMatcher(FormatPlugin plugin); @Override FormatPlugin getFormatPlugin(); @Override boolean matches(FileSystem fs, FileSelection fileSelection, CompressionCodecFactory codecFactory); static final String METADATA_DIR_NAME; } |
@Test public void missingArrowTypes() { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema( NestedField.optional(1, "uuid", Types.UUIDType.get()) ); BatchSchema schema = BatchSchema.newBuilder() .addField(new CompleteType(new FixedSizeBinary(16)).toField("uuid")) .build(); BatchSchema result = schemaConverter.fromIceberg(icebergSchema); assertEquals(result, schema); } | public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } |
@Test public void mixed() throws Exception { BatchSchema schema = BatchSchema.newBuilder() .addField(CompleteType.INT.toField("rownum")) .addField(CompleteType.VARCHAR.toField("name")) .addField(CompleteType.INT.toField("age")) .addField(CompleteType.FLOAT.toField("gpa")) .addField(CompleteType.BIGINT.toField("studentnum")) .addField(CompleteType.TIMESTAMP.toField("create_time")) .addField(CompleteType.VARCHAR.asList().toField("interests")) .addField(CompleteType.struct( CompleteType.VARCHAR.toField("color"), CompleteType.VARCHAR.toField("sport"), CompleteType.VARCHAR.toField("food") ).toField("favorites")) .build(); org.apache.iceberg.Schema expectedSchema = new org.apache.iceberg.Schema( NestedField.optional(1, "rownum", Types.IntegerType.get()), NestedField.optional(2, "name", Types.StringType.get()), NestedField.optional(3, "age", Types.IntegerType.get()), NestedField.optional(4, "gpa", Types.FloatType.get()), NestedField.optional(5, "studentnum", Types.LongType.get()), NestedField.optional(6, "create_time", Types.TimestampType.withZone()), NestedField.optional(7, "interests", Types.ListType.ofOptional(9, Types.StringType.get())), NestedField.optional(8, "favorites", Types.StructType.of( NestedField.optional(10, "color", Types.StringType.get()), NestedField.optional(11, "sport", Types.StringType.get()), NestedField.optional(12, "food", Types.StringType.get()) )) ); org.apache.iceberg.Schema icebergResult = schemaConverter.toIceberg(schema); assertEquals(expectedSchema.toString(), icebergResult.toString()); TemporaryFolder folder = new TemporaryFolder(); folder.create(); String rootPath = folder.getRoot().toString(); Configuration conf = new Configuration(); IcebergCatalog catalog = new IcebergCatalog(rootPath, conf); catalog.beginCreateTable(schema, Collections.emptyList()); catalog.endCreateTable(); Table table = new HadoopTables(conf).load(rootPath); assertEquals(expectedSchema.toString(), table.schema().toString()); } | public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } |
@Test public void testBrokenVDSEditOriginalSQL() throws Exception { Dataset parentVDS = createVDS(Arrays.asList("dsvTest", "badVDSParent"),"select version, commit_id from sys.version"); parentVDS = expectSuccess(getBuilder(getPublicAPI(3).path("catalog")).buildPost(Entity.json(parentVDS)), new GenericType<Dataset>() {}); Dataset newVDS = createVDS(Arrays.asList("dsvTest", "badVDS"),"select version from dsvTest.badVDSParent"); newVDS = expectSuccess(getBuilder(getPublicAPI(3).path("catalog")).buildPost(Entity.json(newVDS)), new GenericType<Dataset>() {}); Dataset updatedParentVDS = new Dataset( parentVDS.getId(), Dataset.DatasetType.VIRTUAL_DATASET, parentVDS.getPath(), null, null, parentVDS.getTag(), parentVDS.getAccelerationRefreshPolicy(), "select commit_id from sys.version", parentVDS.getSqlContext(), parentVDS.getFormat(), null ); expectSuccess(getBuilder(getPublicAPI(3).path("catalog").path(updatedParentVDS.getId())).buildPut(Entity.json(updatedParentVDS)), new GenericType<Dataset>() {}); String dsPath = String.join(".", newVDS.getPath()); DatasetVersion datasetVersion = DatasetVersion.newVersion(); WebTarget target = getAPIv2() .path("datasets") .path("new_untitled") .queryParam("parentDataset", dsPath) .queryParam("newVersion", datasetVersion) .queryParam("limit", 120); ApiErrorModel apiErrorModel = expectStatus(Response.Status.BAD_REQUEST, getBuilder(target).buildPost(Entity.json(null)), new GenericType<ApiErrorModel<InvalidQueryException.Details>>() {}); InvalidQueryException.Details details = (InvalidQueryException.Details) apiErrorModel.getDetails(); target = getAPIv2() .path("dataset") .path(dsPath) .path("version") .path(details.getDatasetSummary().getDatasetVersion().getVersion()) .path("preview") .queryParam("view", "explore") .queryParam("limit", "0"); InitialPreviewResponse initialPreviewResponse = expectSuccess(getBuilder(target).buildGet(), new GenericType<InitialPreviewResponse>() {}); assertEquals(newVDS.getSql(), initialPreviewResponse.getDataset().getSql()); } | @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); @GET @Produces(APPLICATION_JSON) Dataset getDataset(); @GET @Path("preview") @Produces(APPLICATION_JSON) InitialPreviewResponse getDatasetForVersion(
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @GET @Path("review") @Produces(APPLICATION_JSON) InitialPreviewResponse reviewDatasetVersion(
@QueryParam("jobId") String jobId,
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @POST @Path("transformAndPreview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPreviewResponse transformAndPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("transformAndRun") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialTransformAndRunResponse transformAndRun(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion
); @GET @Path("run") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialRunResponse run(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("transformPeek") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPendingTransformResponse transformDataSetPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("save") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) DatasetUIWithHistory saveAsDataSet(
@QueryParam("as") DatasetPath asDatasetPath,
@QueryParam("savedTag") String savedTag // null for the first save
); DatasetUI save(VirtualDatasetUI vds, DatasetPath asDatasetPath, String savedTag, NamespaceAttribute... attributes); @POST @Path("extract") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractRule> getExtractCards(
/* Body */ Selection selection); @POST @Path("extract_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractRule> getExtractCard(
/* Body */ PreviewReq<ExtractRule, Selection> req); @POST @Path("extract_map") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractMapRule> getExtractMapCards(
/* Body */ MapSelection mapSelection); @POST @Path("extract_map_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractMapRule> getExtractMapCard(
/* Body */ PreviewReq<ExtractMapRule, MapSelection> req); @POST @Path("extract_list") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractListRule> getExtractListCards(
/* Body */ Selection selection); @POST @Path("extract_list_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractListRule> getExtractListCard(
/* Body */ PreviewReq<ExtractListRule, Selection> req); @POST @Path("split") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<SplitRule> getSplitCards(
/* Body */ Selection selection); @POST @Path("split_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<SplitRule> getSplitCard(
/* Body */ PreviewReq<SplitRule, Selection> req); @POST @Path("/editOriginalSql") @Produces(APPLICATION_JSON) InitialPreviewResponse reapplyDatasetAndPreview(); @POST @Path("/reapplyAndSave") @Produces(APPLICATION_JSON) DatasetUIWithHistory reapplySave(
@QueryParam("as") DatasetPath asDatasetPath
); @POST @Path("replace") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getReplaceCards(
/* Body */ Selection selection); @POST @Path("replace_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getReplaceCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("replace_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getReplaceValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("keeponly") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getKeeponlyCards(
/* Body */ Selection selection); @POST @Path("keeponly_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getKeeponlyCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("keeponly_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getKeeponlyValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("exclude") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getExcludeCards(
/* Body */ Selection selection); @POST @Path("exclude_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getExcludeCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("exclude_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getExcludeValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @GET @Path("history") @Produces(APPLICATION_JSON) History getHistory(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("clean") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) CleanDataCard getCleanDataCard(
ColumnForCleaning col); @GET @Path("join_recs") @Produces(APPLICATION_JSON) JoinRecommendations getJoinRecommendations(); @GET @Path("parents") @Produces(APPLICATION_JSON) List<ParentDatasetUI> getParents(); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); @GET @Produces(APPLICATION_JSON) Dataset getDataset(); @GET @Path("preview") @Produces(APPLICATION_JSON) InitialPreviewResponse getDatasetForVersion(
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @GET @Path("review") @Produces(APPLICATION_JSON) InitialPreviewResponse reviewDatasetVersion(
@QueryParam("jobId") String jobId,
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @POST @Path("transformAndPreview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPreviewResponse transformAndPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("transformAndRun") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialTransformAndRunResponse transformAndRun(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion
); @GET @Path("run") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialRunResponse run(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("transformPeek") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPendingTransformResponse transformDataSetPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("save") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) DatasetUIWithHistory saveAsDataSet(
@QueryParam("as") DatasetPath asDatasetPath,
@QueryParam("savedTag") String savedTag // null for the first save
); DatasetUI save(VirtualDatasetUI vds, DatasetPath asDatasetPath, String savedTag, NamespaceAttribute... attributes); @POST @Path("extract") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractRule> getExtractCards(
/* Body */ Selection selection); @POST @Path("extract_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractRule> getExtractCard(
/* Body */ PreviewReq<ExtractRule, Selection> req); @POST @Path("extract_map") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractMapRule> getExtractMapCards(
/* Body */ MapSelection mapSelection); @POST @Path("extract_map_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractMapRule> getExtractMapCard(
/* Body */ PreviewReq<ExtractMapRule, MapSelection> req); @POST @Path("extract_list") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractListRule> getExtractListCards(
/* Body */ Selection selection); @POST @Path("extract_list_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractListRule> getExtractListCard(
/* Body */ PreviewReq<ExtractListRule, Selection> req); @POST @Path("split") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<SplitRule> getSplitCards(
/* Body */ Selection selection); @POST @Path("split_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<SplitRule> getSplitCard(
/* Body */ PreviewReq<SplitRule, Selection> req); @POST @Path("/editOriginalSql") @Produces(APPLICATION_JSON) InitialPreviewResponse reapplyDatasetAndPreview(); @POST @Path("/reapplyAndSave") @Produces(APPLICATION_JSON) DatasetUIWithHistory reapplySave(
@QueryParam("as") DatasetPath asDatasetPath
); @POST @Path("replace") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getReplaceCards(
/* Body */ Selection selection); @POST @Path("replace_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getReplaceCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("replace_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getReplaceValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("keeponly") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getKeeponlyCards(
/* Body */ Selection selection); @POST @Path("keeponly_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getKeeponlyCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("keeponly_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getKeeponlyValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("exclude") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getExcludeCards(
/* Body */ Selection selection); @POST @Path("exclude_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getExcludeCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("exclude_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getExcludeValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @GET @Path("history") @Produces(APPLICATION_JSON) History getHistory(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("clean") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) CleanDataCard getCleanDataCard(
ColumnForCleaning col); @GET @Path("join_recs") @Produces(APPLICATION_JSON) JoinRecommendations getJoinRecommendations(); @GET @Path("parents") @Produces(APPLICATION_JSON) List<ParentDatasetUI> getParents(); static final List<DataType> AVAILABLE_TYPES_FOR_CLEANING; } |
@Test public void testPartitionComparatorField() { BatchSchema inputschema = BatchSchema.newBuilder() .addField(CompleteType.BIT.toField("boolean")) .addField(CompleteType.INT.toField("int")) .addField(CompleteType.BIT.toField(WriterPrel.PARTITION_COMPARATOR_FIELD)) .build(); org.apache.iceberg.Schema expectedSchema = new org.apache.iceberg.Schema( NestedField.optional(1, "boolean", Types.BooleanType.get()), NestedField.optional(2, "int", Types.IntegerType.get())); SchemaConverter convert = new SchemaConverter(); assertEquals(convert.toIceberg(inputschema).toString(), expectedSchema.toString()); } | public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } |
@Test public void unsupportedArrowTypes() { BatchSchema inputSchema = BatchSchema.newBuilder() .addField(CompleteType.union( CompleteType.INT.toField("int_field"), CompleteType.BIGINT.toField("bigint_field") ).toField("union_field")) .build(); expectedEx.expect(UserException.class); expectedEx.expectMessage("conversion from arrow type to iceberg type failed for field union_field"); SchemaConverter convert = new SchemaConverter(); convert.toIceberg(inputSchema); } | public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } | SchemaConverter { public org.apache.iceberg.Schema toIceberg(BatchSchema schema) { org.apache.iceberg.Schema icebergSchema = new org.apache.iceberg.Schema(schema .getFields() .stream() .filter(x -> !x.getName().equalsIgnoreCase(WriterPrel.PARTITION_COMPARATOR_FIELD)) .map(x -> toIcebergColumn(x)) .collect(Collectors.toList())); return TypeUtil.assignIncreasingFreshIds(icebergSchema); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } |
@Test public void unsupportedIcebergTypes() { org.apache.iceberg.Schema schema = new org.apache.iceberg.Schema( NestedField.optional(1, "timestamp_nozone_field", Types.TimestampType.withoutZone()) ); expectedEx.expect(UserException.class); expectedEx.expectMessage("conversion from iceberg type to arrow type failed for field timestamp_nozone_field"); SchemaConverter convert = new SchemaConverter(); convert.fromIceberg(schema); } | public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } | SchemaConverter { public BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema) { return new BatchSchema(icebergSchema .columns() .stream() .map(SchemaConverter::fromIcebergColumn) .filter(Objects::nonNull) .collect(Collectors.toList())); } SchemaConverter(); BatchSchema fromIceberg(org.apache.iceberg.Schema icebergSchema); static Field fromIcebergColumn(NestedField field); static CompleteType fromIcebergType(Type type); static CompleteType fromIcebergPrimitiveType(PrimitiveType type); org.apache.iceberg.Schema toIceberg(BatchSchema schema); static NestedField toIcebergColumn(Field field); static Schema getChildSchemaForStruct(Schema schema, String structName); static Schema getChildSchemaForList(Schema schema, String listName); } |
@Test public void simpleOneFileOnAHost() throws Exception { final List<CompleteWork> workUnits = asList( newWork("/10.0.0.1/table/foo1", 1024, ENDPOINT_1_1, 1.00) ); ListMultimap<Integer, CompleteWork> mappings; List<NodeEndpoint> endpoints; endpoints = asList(ENDPOINT_1_1); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_2); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); verifyAssignmentFails(workUnits, ENDPOINT_2_1); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_1_2); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_2_2); } | public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); static HardAssignmentCreator INSTANCE; } |
@Test public void simpleTwoFileOneOnEachHost() throws Exception { final List<CompleteWork> workUnits = asList( newWork("/10.0.0.1/table/foo1", 1024, ENDPOINT_1_1, 0.33), newWork("/10.0.0.2/table/foo2", 2048, ENDPOINT_2_2, 0.66) ); ListMultimap<Integer, CompleteWork> mappings; List<NodeEndpoint> endpoints; endpoints = asList(ENDPOINT_1_1, ENDPOINT_2_2); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_2, ENDPOINT_2_1); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_1, ENDPOINT_2_1); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_2, ENDPOINT_2_2); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_1_2); verifyAssignmentFails(workUnits, ENDPOINT_1_1); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_1_2, ENDPOINT_2_1); } | public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); static HardAssignmentCreator INSTANCE; } |
@Test public void twoFilesOnSameHost() throws Exception { final List<CompleteWork> workUnits = asList( newWork("/10.0.0.1/table/foo1", 1024, ENDPOINT_1_1, 0.33), newWork("/10.0.0.1/table/foo2", 2048, ENDPOINT_1_2, 0.66) ); ListMultimap<Integer, CompleteWork> mappings; List<NodeEndpoint> endpoints; endpoints = asList(ENDPOINT_1_1, ENDPOINT_1_2); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_2); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_2_1); } | public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); static HardAssignmentCreator INSTANCE; } |
@Test public void oneOrMoreFilesOnEachHost() throws Exception { final List<CompleteWork> workUnits = asList( newWork("/10.0.0.1/table/foo", 1024, ENDPOINT_1_1, 1024f/48124f), newWork("/10.0.0.1/table/bar", 4096, ENDPOINT_1_2, 4096f/48124f), newWork("/10.0.0.1/table/fb", 8192, ENDPOINT_1_2, 8192f/48124f), newWork("/10.0.0.2/table/foo", 2048, ENDPOINT_2_2, 2048f/48124f), newWork("/10.0.0.2/table/bar", 4096, ENDPOINT_2_1, 4096f/48124f), newWork("/10.0.0.3/table/foo", 16384, ENDPOINT_3_1, 16384f/48124f), newWork("/10.0.0.3/table/bar", 2046, ENDPOINT_3_2, 2046f/48124f), newWork("/10.0.0.3/table/bar2", 6144, ENDPOINT_3_2, 6144f/48124f), newWork("/10.0.0.3/table/bar3", 2046, ENDPOINT_3_2, 2046f/48124f), newWork("/10.0.0.4/table/bar", 2046, ENDPOINT_4_1, 2046f/48124f) ); ListMultimap<Integer, CompleteWork> mappings; List<NodeEndpoint> endpoints; endpoints = ENDPOINTS.subList(0, 7); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); endpoints = asList(ENDPOINT_1_1, ENDPOINT_2_1, ENDPOINT_3_1, ENDPOINT_4_1); mappings = INSTANCE.getMappings(endpoints, workUnits); verifyAssignments(mappings, endpoints, workUnits); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_2_1, ENDPOINT_3_1); verifyAssignmentFails(workUnits, ENDPOINT_1_1, ENDPOINT_2_1, ENDPOINT_3_1, ENDPOINT_4_1, ENDPOINT_4_2); } | public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); } | HardAssignmentCreator { public <T extends CompleteWork> ListMultimap<Integer, T> getMappings( final List<NodeEndpoint> endpoints, final List<T> units) throws PhysicalOperatorSetupException { verify(endpoints, units, units.size() >= endpoints.size(), "There should be at least one work unit for each hard affinity node."); final ListMultimap<String, Integer> endpointsOnHostMap = ArrayListMultimap.create(); int index = 0; for(NodeEndpoint incoming : endpoints) { endpointsOnHostMap.put(incoming.getAddress(), index); index++; } final Map<String, Iterator<Integer>> endpointIteratorOnHostMap = Maps.newHashMap(); for(Map.Entry<String, Collection<Integer>> entry: endpointsOnHostMap.asMap().entrySet()) { endpointIteratorOnHostMap.put(entry.getKey(), Iterables.cycle(entry.getValue()).iterator()); } final ListMultimap<Integer, T> mappings = ArrayListMultimap.create(); for(T unit: units) { final List<EndpointAffinity> affinities = unit.getAffinity(); verify(endpoints, units, affinities.size() == 1, "Expected the hard affinity work unit to have affinity to only one endpoint"); final EndpointAffinity endpointAffinity = affinities.get(0); final String host = endpointAffinity.getEndpoint().getAddress(); final Iterator<Integer> endpointsOnHost = endpointIteratorOnHostMap.get(host); if (endpointsOnHost == null) { verify(endpoints, units, false, "There are no endpoints in assigned list running on host %s", host); } final int endpointId = endpointIteratorOnHostMap.get(host).next(); mappings.put(endpointId, unit); } for(int i = 0; i < endpoints.size(); i++) { if (!mappings.containsKey(i)) { verify(endpoints, units, false, "Endpoint %s has no assigned work.", endpoints.get(i)); } } return mappings; } ListMultimap<Integer, T> getMappings(
final List<NodeEndpoint> endpoints, final List<T> units); static HardAssignmentCreator INSTANCE; } |
@Test public void testGenerateSourceErrorMessage() { final String sourceName = "test-source"; final String errorMessage = "Failed to establish connection"; Assert.assertEquals("Source 'test-source' returned error 'Failed to establish connection'", StoragePluginUtils.generateSourceErrorMessage(sourceName, errorMessage)); } | public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } |
@Test public void testGenerateSourceErrorMessageFromFormatString() { final String sourceName = "test-source"; final String errorFmtString = "Returned status code %s from cluster"; Assert.assertEquals("Source 'test-source' returned error 'Returned status code 500 from cluster'", StoragePluginUtils.generateSourceErrorMessage(sourceName, errorFmtString, "500")); } | public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } | StoragePluginUtils { public static String generateSourceErrorMessage(final String storagePluginName, String errorMessage) { return String.format("Source '%s' returned error '%s'", storagePluginName, errorMessage); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } |
@Test public void testAddContextAndErrorMessageToUserException() { final UserException.Builder builder = UserException.validationError(); final String errorMessageFormatString = "Invalid username: %s"; final String sourceName = "fictitious-source"; final UserException userException = StoragePluginUtils.message( builder, sourceName, errorMessageFormatString, "invalid-user").buildSilently(); Assert.assertEquals("Source 'fictitious-source' returned error 'Invalid username: invalid-user'", userException.getMessage()); Assert.assertEquals("plugin fictitious-source", userException.getContextStrings().get(0)); } | public static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args) { return builder.message(generateSourceErrorMessage(sourceName, errorMessage), args) .addContext("plugin", sourceName); } | StoragePluginUtils { public static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args) { return builder.message(generateSourceErrorMessage(sourceName, errorMessage), args) .addContext("plugin", sourceName); } } | StoragePluginUtils { public static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args) { return builder.message(generateSourceErrorMessage(sourceName, errorMessage), args) .addContext("plugin", sourceName); } private StoragePluginUtils(); } | StoragePluginUtils { public static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args) { return builder.message(generateSourceErrorMessage(sourceName, errorMessage), args) .addContext("plugin", sourceName); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } | StoragePluginUtils { public static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args) { return builder.message(generateSourceErrorMessage(sourceName, errorMessage), args) .addContext("plugin", sourceName); } private StoragePluginUtils(); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage); static String generateSourceErrorMessage(final String storagePluginName, String errorMessage, Object... args); static UserException.Builder message(UserException.Builder builder, String sourceName, String errorMessage, Object... args); } |
@Test public void testRenameShouldNotBreakHistory() throws Exception { Dataset parentVDS = createVDS(Arrays.asList("dsvTest", "renameParentVDS"),"select * from sys.version"); Dataset vds = expectSuccess(getBuilder(getPublicAPI(3).path("catalog")).buildPost(Entity.json(parentVDS)), new GenericType<Dataset>() {}); String parentDataset = String.join(".", parentVDS.getPath()); DatasetVersion datasetVersion = DatasetVersion.newVersion(); WebTarget target = getAPIv2() .path("datasets") .path("new_untitled") .queryParam("parentDataset", parentDataset) .queryParam("newVersion", datasetVersion) .queryParam("limit", 120); InitialPreviewResponse initialPreviewResponse = expectSuccess(getBuilder(target).buildPost(Entity.json(null)), new GenericType<InitialPreviewResponse>() {}); target = getAPIv2() .path("dataset") .path("tmp.UNTITLED") .path("version") .path(datasetVersion.getVersion()) .path("save") .queryParam("as", "dsvTest.renameVDS"); DatasetUIWithHistory dswh = expectSuccess(getBuilder(target).buildPost(Entity.json(null)), new GenericType<DatasetUIWithHistory>() {}); DatasetVersion datasetVersion2 = DatasetVersion.newVersion(); String dsPath = String.join(".", dswh.getDataset().getFullPath()); target = getAPIv2() .path("dataset") .path(dsPath) .path("version") .path(dswh.getDataset().getDatasetVersion().getVersion()) .path("transformAndPreview") .queryParam("newVersion", datasetVersion2); TransformUpdateSQL transformSql = new TransformUpdateSQL(); transformSql.setSql("SELECT \"version\" FROM dsvTest.renameParentVDS"); initialPreviewResponse = expectSuccess(getBuilder(target).buildPost(Entity.json(transformSql)), new GenericType<InitialPreviewResponse>() {}); target = getAPIv2() .path("dataset") .path(dsPath) .path("version") .path(initialPreviewResponse.getDataset().getDatasetVersion().getVersion()) .path("save") .queryParam("as", "dsvTest.renameVDS2"); DatasetUIWithHistory dswh2 = expectSuccess(getBuilder(target).buildPost(Entity.json(null)), new GenericType<DatasetUIWithHistory>() {}); DatasetVersionMutator mutator = l(DatasetVersionMutator.class); VirtualDatasetUI renameDataset = mutator.renameDataset(new DatasetPath(dswh2.getDataset().getFullPath()), new DatasetPath(Arrays.asList("dsvTest", "renameVDS2-new"))); parentDataset = String.join(".", renameDataset.getFullPathList()); datasetVersion = DatasetVersion.newVersion(); target = getAPIv2() .path("datasets") .path("new_untitled") .queryParam("parentDataset", parentDataset) .queryParam("newVersion", datasetVersion) .queryParam("limit", 0); initialPreviewResponse = expectSuccess(getBuilder(target).buildPost(Entity.json(null)), new GenericType<InitialPreviewResponse>() {}); InitialPreviewResponse reapplyResult = reapply(getDatasetVersionPath(initialPreviewResponse.getDataset())); } | @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); @GET @Produces(APPLICATION_JSON) Dataset getDataset(); @GET @Path("preview") @Produces(APPLICATION_JSON) InitialPreviewResponse getDatasetForVersion(
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @GET @Path("review") @Produces(APPLICATION_JSON) InitialPreviewResponse reviewDatasetVersion(
@QueryParam("jobId") String jobId,
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @POST @Path("transformAndPreview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPreviewResponse transformAndPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("transformAndRun") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialTransformAndRunResponse transformAndRun(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion
); @GET @Path("run") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialRunResponse run(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("transformPeek") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPendingTransformResponse transformDataSetPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("save") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) DatasetUIWithHistory saveAsDataSet(
@QueryParam("as") DatasetPath asDatasetPath,
@QueryParam("savedTag") String savedTag // null for the first save
); DatasetUI save(VirtualDatasetUI vds, DatasetPath asDatasetPath, String savedTag, NamespaceAttribute... attributes); @POST @Path("extract") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractRule> getExtractCards(
/* Body */ Selection selection); @POST @Path("extract_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractRule> getExtractCard(
/* Body */ PreviewReq<ExtractRule, Selection> req); @POST @Path("extract_map") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractMapRule> getExtractMapCards(
/* Body */ MapSelection mapSelection); @POST @Path("extract_map_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractMapRule> getExtractMapCard(
/* Body */ PreviewReq<ExtractMapRule, MapSelection> req); @POST @Path("extract_list") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractListRule> getExtractListCards(
/* Body */ Selection selection); @POST @Path("extract_list_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractListRule> getExtractListCard(
/* Body */ PreviewReq<ExtractListRule, Selection> req); @POST @Path("split") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<SplitRule> getSplitCards(
/* Body */ Selection selection); @POST @Path("split_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<SplitRule> getSplitCard(
/* Body */ PreviewReq<SplitRule, Selection> req); @POST @Path("/editOriginalSql") @Produces(APPLICATION_JSON) InitialPreviewResponse reapplyDatasetAndPreview(); @POST @Path("/reapplyAndSave") @Produces(APPLICATION_JSON) DatasetUIWithHistory reapplySave(
@QueryParam("as") DatasetPath asDatasetPath
); @POST @Path("replace") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getReplaceCards(
/* Body */ Selection selection); @POST @Path("replace_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getReplaceCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("replace_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getReplaceValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("keeponly") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getKeeponlyCards(
/* Body */ Selection selection); @POST @Path("keeponly_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getKeeponlyCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("keeponly_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getKeeponlyValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("exclude") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getExcludeCards(
/* Body */ Selection selection); @POST @Path("exclude_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getExcludeCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("exclude_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getExcludeValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @GET @Path("history") @Produces(APPLICATION_JSON) History getHistory(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("clean") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) CleanDataCard getCleanDataCard(
ColumnForCleaning col); @GET @Path("join_recs") @Produces(APPLICATION_JSON) JoinRecommendations getJoinRecommendations(); @GET @Path("parents") @Produces(APPLICATION_JSON) List<ParentDatasetUI> getParents(); } | DatasetVersionResource extends BaseResourceWithAllocator { @GET @Produces(APPLICATION_JSON) public Dataset getDataset() throws DatasetVersionNotFoundException, DatasetNotFoundException, NamespaceException { return getCurrentDataset(); } @Inject DatasetVersionResource(
SabotContext context,
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
NamespaceService namespaceService,
JoinRecommender joinRecommender,
@Context SecurityContext securityContext,
@PathParam("cpath") DatasetPath datasetPath,
@PathParam("version") DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocator allocator
); protected DatasetVersionResource(
QueryExecutor executor,
DatasetVersionMutator datasetService,
JobsService jobsService,
Recommenders recommenders,
Transformer transformer,
JoinRecommender joinRecommender,
DatasetTool datasetTool,
HistogramGenerator histograms,
SecurityContext securityContext,
DatasetPath datasetPath,
DatasetVersion version,
BufferAllocatorFactory allocatorFactory
); @GET @Produces(APPLICATION_JSON) Dataset getDataset(); @GET @Path("preview") @Produces(APPLICATION_JSON) InitialPreviewResponse getDatasetForVersion(
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @GET @Path("review") @Produces(APPLICATION_JSON) InitialPreviewResponse reviewDatasetVersion(
@QueryParam("jobId") String jobId,
@QueryParam("tipVersion") DatasetVersion tipVersion,
@QueryParam("limit") Integer limit); @POST @Path("transformAndPreview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPreviewResponse transformAndPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("transformAndRun") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialTransformAndRunResponse transformAndRun(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion
); @GET @Path("run") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialRunResponse run(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("transformPeek") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) InitialPendingTransformResponse transformDataSetPreview(
/* Body */ TransformBase transform,
@QueryParam("newVersion") DatasetVersion newVersion,
@QueryParam("limit") @DefaultValue("50") int limit); @POST @Path("save") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) DatasetUIWithHistory saveAsDataSet(
@QueryParam("as") DatasetPath asDatasetPath,
@QueryParam("savedTag") String savedTag // null for the first save
); DatasetUI save(VirtualDatasetUI vds, DatasetPath asDatasetPath, String savedTag, NamespaceAttribute... attributes); @POST @Path("extract") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractRule> getExtractCards(
/* Body */ Selection selection); @POST @Path("extract_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractRule> getExtractCard(
/* Body */ PreviewReq<ExtractRule, Selection> req); @POST @Path("extract_map") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractMapRule> getExtractMapCards(
/* Body */ MapSelection mapSelection); @POST @Path("extract_map_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractMapRule> getExtractMapCard(
/* Body */ PreviewReq<ExtractMapRule, MapSelection> req); @POST @Path("extract_list") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<ExtractListRule> getExtractListCards(
/* Body */ Selection selection); @POST @Path("extract_list_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ExtractListRule> getExtractListCard(
/* Body */ PreviewReq<ExtractListRule, Selection> req); @POST @Path("split") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Cards<SplitRule> getSplitCards(
/* Body */ Selection selection); @POST @Path("split_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<SplitRule> getSplitCard(
/* Body */ PreviewReq<SplitRule, Selection> req); @POST @Path("/editOriginalSql") @Produces(APPLICATION_JSON) InitialPreviewResponse reapplyDatasetAndPreview(); @POST @Path("/reapplyAndSave") @Produces(APPLICATION_JSON) DatasetUIWithHistory reapplySave(
@QueryParam("as") DatasetPath asDatasetPath
); @POST @Path("replace") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getReplaceCards(
/* Body */ Selection selection); @POST @Path("replace_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getReplaceCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("replace_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getReplaceValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("keeponly") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getKeeponlyCards(
/* Body */ Selection selection); @POST @Path("keeponly_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getKeeponlyCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("keeponly_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getKeeponlyValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @POST @Path("exclude") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceCards getExcludeCards(
/* Body */ Selection selection); @POST @Path("exclude_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) Card<ReplacePatternRule> getExcludeCard(
/* Body */ PreviewReq<ReplacePatternRule, Selection> req); @POST @Path("exclude_values_preview") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) ReplaceValuesCard getExcludeValuesCard(
/* Body */ ReplaceValuesPreviewReq req); @GET @Path("history") @Produces(APPLICATION_JSON) History getHistory(@QueryParam("tipVersion") DatasetVersion tipVersion); @POST @Path("clean") @Produces(APPLICATION_JSON) @Consumes(APPLICATION_JSON) CleanDataCard getCleanDataCard(
ColumnForCleaning col); @GET @Path("join_recs") @Produces(APPLICATION_JSON) JoinRecommendations getJoinRecommendations(); @GET @Path("parents") @Produces(APPLICATION_JSON) List<ParentDatasetUI> getParents(); static final List<DataType> AVAILABLE_TYPES_FOR_CLEANING; } |
@Test public void testIsTextField() { ManagedSchemaField varcharField = ManagedSchemaField.newFixedLenField("varchar_col", "varchar(20)", 20, 0); assertTrue(varcharField.isTextField()); ManagedSchemaField charField = ManagedSchemaField.newFixedLenField("char_col", "char(20)", 20, 0); assertTrue(charField.isTextField()); ManagedSchemaField stringField = ManagedSchemaField.newFixedLenField("string_col", "String", CompleteType.DEFAULT_VARCHAR_PRECISION, 0); assertTrue(stringField.isTextField()); ManagedSchemaField decimalField = ManagedSchemaField.newUnboundedLenField("decimal_col", "decimal"); assertFalse(decimalField.isTextField()); } | public boolean isTextField() { return isTextFieldType(type); } | ManagedSchemaField { public boolean isTextField() { return isTextFieldType(type); } } | ManagedSchemaField { public boolean isTextField() { return isTextFieldType(type); } private ManagedSchemaField(final String name, final String type, final int length, final int scale, final boolean isUnbounded); } | ManagedSchemaField { public boolean isTextField() { return isTextFieldType(type); } private ManagedSchemaField(final String name, final String type, final int length, final int scale, final boolean isUnbounded); static ManagedSchemaField newUnboundedLenField(final String name, final String type); static ManagedSchemaField newFixedLenField(final String name, final String type, final int length, final int scale); String getName(); String getType(); int getLength(); int getScale(); boolean isTextField(); boolean isUnbounded(); @Override String toString(); } | ManagedSchemaField { public boolean isTextField() { return isTextFieldType(type); } private ManagedSchemaField(final String name, final String type, final int length, final int scale, final boolean isUnbounded); static ManagedSchemaField newUnboundedLenField(final String name, final String type); static ManagedSchemaField newFixedLenField(final String name, final String type, final int length, final int scale); String getName(); String getType(); int getLength(); int getScale(); boolean isTextField(); boolean isUnbounded(); @Override String toString(); } |
@Test public void testNullUser() throws Exception { thrown.expect(IllegalArgumentException.class); ImpersonationUtil.createProxyUgi(null); } | public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); static String resolveUserName(String username); static UserGroupInformation createProxyUgi(String proxyUserName); static String getProcessUserName(); static UserGroupInformation getProcessUserUGI(); static FileSystem createFileSystem(String proxyUserName, Configuration fsConf, Path path); } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); static String resolveUserName(String username); static UserGroupInformation createProxyUgi(String proxyUserName); static String getProcessUserName(); static UserGroupInformation getProcessUserUGI(); static FileSystem createFileSystem(String proxyUserName, Configuration fsConf, Path path); } |
@Test public void testEmptyUser() throws Exception { thrown.expect(IllegalArgumentException.class); ImpersonationUtil.createProxyUgi(""); } | public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); static String resolveUserName(String username); static UserGroupInformation createProxyUgi(String proxyUserName); static String getProcessUserName(); static UserGroupInformation getProcessUserUGI(); static FileSystem createFileSystem(String proxyUserName, Configuration fsConf, Path path); } | ImpersonationUtil { public static UserGroupInformation createProxyUgi(String proxyUserName) { try { if (Strings.isNullOrEmpty(proxyUserName)) { throw new IllegalArgumentException("Invalid value for proxy user name"); } if (proxyUserName.equals(getProcessUserName()) || SYSTEM_USERNAME.equals(proxyUserName)) { return getProcessUserUGI(); } return CACHE.get(new Key(proxyUserName, UserGroupInformation.getLoginUser())); } catch (IOException | ExecutionException e) { final String errMsg = "Failed to create proxy user UserGroupInformation object: " + e.getMessage(); logger.error(errMsg, e); throw new RuntimeException(errMsg, e); } } private ImpersonationUtil(); static String resolveUserName(String username); static UserGroupInformation createProxyUgi(String proxyUserName); static String getProcessUserName(); static UserGroupInformation getProcessUserUGI(); static FileSystem createFileSystem(String proxyUserName, Configuration fsConf, Path path); } |
@Test public void test() { FormatPluginOptionExtractor e = new FormatPluginOptionExtractor(CLASSPATH_SCAN_RESULT); Collection<FormatPluginOptionsDescriptor> options = e.getOptions(); for (FormatPluginOptionsDescriptor d : options) { assertEquals(d.pluginConfigClass.getAnnotation(JsonTypeName.class).value(), d.typeName); switch (d.typeName) { case "text": assertEquals(TextFormatConfig.class, d.pluginConfigClass); assertEquals( "(type: String, lineDelimiter: String, fieldDelimiter: String, quote: String, escape: String, " + "comment: String, skipFirstLine: boolean, extractHeader: boolean, " + "autoGenerateColumnNames: boolean, trimHeader: boolean, outputExtension: String)", d.presentParams() ); break; case "named": assertEquals(NamedFormatPluginConfig.class, d.pluginConfigClass); assertEquals("(type: String, name: String)", d.presentParams()); break; case "json": assertEquals(d.typeName, "(type: String, outputExtension: String, prettyPrint: boolean)", d.presentParams()); break; case "parquet": assertEquals(d.typeName, "(type: String, autoCorrectCorruptDates: boolean, outputExtension: String)", d.presentParams()); break; case "arrow": assertEquals(d.typeName, "(type: String, outputExtension: String)", d.presentParams()); break; case "sequencefile": case "avro": assertEquals(d.typeName, "(type: String)", d.presentParams()); break; case "excel": assertEquals(d.typeName, "(type: String, sheet: String, extractHeader: boolean, hasMergedCells: boolean, xls: boolean)", d.presentParams()); break; case "iceberg": assertEquals(d.typeName, "(type: String, metaStoreType: IcebergMetaStoreType, dataFormatType: FileType, dataFormatConfig: FormatPluginConfig)", d.presentParams()); break; default: fail("add validation for format plugin type " + d.typeName); } } } | @VisibleForTesting Collection<FormatPluginOptionsDescriptor> getOptions() { return optionsByTypeName.values(); } | FormatPluginOptionExtractor { @VisibleForTesting Collection<FormatPluginOptionsDescriptor> getOptions() { return optionsByTypeName.values(); } } | FormatPluginOptionExtractor { @VisibleForTesting Collection<FormatPluginOptionsDescriptor> getOptions() { return optionsByTypeName.values(); } FormatPluginOptionExtractor(ScanResult scanResult); } | FormatPluginOptionExtractor { @VisibleForTesting Collection<FormatPluginOptionsDescriptor> getOptions() { return optionsByTypeName.values(); } FormatPluginOptionExtractor(ScanResult scanResult); FormatPluginConfig createConfigForTable(TableInstance t); List<Function> getFunctions(final List<String> tableSchemaPath, final FileSystemPlugin plugin, final SchemaConfig schemaConfig); } | FormatPluginOptionExtractor { @VisibleForTesting Collection<FormatPluginOptionsDescriptor> getOptions() { return optionsByTypeName.values(); } FormatPluginOptionExtractor(ScanResult scanResult); FormatPluginConfig createConfigForTable(TableInstance t); List<Function> getFunctions(final List<String> tableSchemaPath, final FileSystemPlugin plugin, final SchemaConfig schemaConfig); } |
@Test public void trueWhenAllColumnsAreSelected() { BatchSchema schema = mock(BatchSchema.class); when(schema.iterator()) .thenReturn(Lists.newArrayList(Field.nullable("a1", new ArrowType.Bool())).iterator()); assertTrue(EasyScanOperatorCreator.selectsAllColumns(schema, Lists.<SchemaPath>newArrayList(SchemaPath.getSimplePath("a1")))); } | static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } |
@Test public void selectionIgnoresIncremental() { BatchSchema schema = mock(BatchSchema.class); when(schema.iterator()) .thenReturn(Lists.newArrayList(Field.nullable("a1", new ArrowType.Bool()), Field.nullable(IncrementalUpdateUtils.UPDATE_COLUMN, new ArrowType.Bool())).iterator()); assertTrue(EasyScanOperatorCreator.selectsAllColumns(schema, Lists.<SchemaPath>newArrayList(SchemaPath.getSimplePath("a1")))); } | static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } |
@Test public void falseWhenAllColumnsAreNotSelected() { BatchSchema schema = mock(BatchSchema.class); when(schema.iterator()) .thenReturn(Lists.newArrayList(Field.nullable("a1", new ArrowType.Bool()), Field.nullable("a2", new ArrowType.Bool())).iterator()); assertFalse(EasyScanOperatorCreator.selectsAllColumns(schema, Lists.<SchemaPath>newArrayList(SchemaPath.getSimplePath("a1")))); } | static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } |
@Test public void falseWhenChildrenAreSelected() { BatchSchema schema = mock(BatchSchema.class); when(schema.iterator()) .thenReturn(Lists.newArrayList( new Field("a1", new FieldType(true, new ArrowType.Struct(), null), Lists.newArrayList(Field.nullable("a2", new ArrowType.Bool()))), Field.nullable("a3", new ArrowType.Bool())).iterator()); assertFalse(EasyScanOperatorCreator.selectsAllColumns(schema, Lists.newArrayList(SchemaPath.getSimplePath("a1"), SchemaPath.getCompoundPath("a1", "a2"), SchemaPath.getSimplePath("a3")))); } | static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } | EasyScanOperatorCreator implements ProducerOperator.Creator<EasySubScan> { static boolean selectsAllColumns(final BatchSchema datasetSchema, final List<SchemaPath> projectedColumns) { final Set<String> columnsInTable = FluentIterable.from(datasetSchema) .transform( new Function<Field, String>() { @Override public String apply(Field input) { return input.getName(); }}) .filter( new Predicate<String>() { @Override public boolean apply(String input) { return !input.equals(IncrementalUpdateUtils.UPDATE_COLUMN); }}) .toSet(); final Set<String> selectedColumns = FluentIterable.from(projectedColumns) .transform( new Function<SchemaPath, String>() { @Override public String apply(SchemaPath input) { return input.getAsUnescapedPath(); } }) .toSet(); return columnsInTable.equals(selectedColumns); } @Override ProducerOperator create(FragmentExecutionContext fragmentExecContext, final OperatorContext context, EasySubScan config); } |
@Test public void withUniqueConnProps() throws Exception { final DremioFileSystemCache dfsc = new DremioFileSystemCache(); final URI uri = URI.create("file: final List<String> uniqueProps = ImmutableList.of("prop1", "prop2"); Configuration conf1 = new Configuration(); FileSystem fs1 = dfsc.get(uri, conf1, uniqueProps); Configuration conf2 = new Configuration(conf1); conf2.set("prop1", "prop1Val"); FileSystem fs2 = dfsc.get(uri, conf2, uniqueProps); assertTrue(fs1 != fs2); FileSystem fs3 = dfsc.get(uri, conf2, uniqueProps); assertTrue(fs2 == fs3); FileSystem fs4 = getAs("newUser", dfsc, uri, conf2, uniqueProps); assertTrue(fs2 != fs4); assertTrue(fs1 != fs4); FileSystem fs5 = dfsc.get(uri, conf1, null); assertTrue(fs1 != fs5); FileSystem fs6 = dfsc.get(uri, conf1, null); assertTrue(fs5 == fs6); } | public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } |
@Test public void withoutUniqueConnProps() throws Exception { final DremioFileSystemCache dfsc = new DremioFileSystemCache(); final URI uri = URI.create("file: Configuration conf1 = new Configuration(); FileSystem fs1 = dfsc.get(uri, conf1, null); Configuration conf2 = new Configuration(conf1); conf2.set("blah", "boo"); FileSystem fs2 = dfsc.get(uri, conf2, null); assertTrue(fs1 == fs2); FileSystem fs3 = getAs("newUser", dfsc, uri, conf1, null); assertTrue(fs1 != fs3); } | public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } |
@Test public void test() { Mockito.when(context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE)).thenReturn(this.inputValue); assertEquals(this.expectedValue, DataJsonOutput.isNumberAsString(context)); } | public static final boolean isNumberAsString(DatabindContext context) { Object attr = context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE); return attr instanceof Boolean && ((Boolean)attr).booleanValue(); } | DataJsonOutput { public static final boolean isNumberAsString(DatabindContext context) { Object attr = context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE); return attr instanceof Boolean && ((Boolean)attr).booleanValue(); } } | DataJsonOutput { public static final boolean isNumberAsString(DatabindContext context) { Object attr = context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE); return attr instanceof Boolean && ((Boolean)attr).booleanValue(); } DataJsonOutput(JsonGenerator gen, boolean convertNumbersToStrings); } | DataJsonOutput { public static final boolean isNumberAsString(DatabindContext context) { Object attr = context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE); return attr instanceof Boolean && ((Boolean)attr).booleanValue(); } DataJsonOutput(JsonGenerator gen, boolean convertNumbersToStrings); static final ObjectWriter setNumbersAsStrings(ObjectWriter writer, boolean isEnabled); static final boolean isNumberAsString(DatabindContext context); void writeStartArray(); void writeEndArray(); void writeStartObject(); void writeEndObject(); void writeFieldName(String name); void writeVarChar(String value); void writeBoolean(boolean value); void writeDecimal(FieldReader reader, JsonOutputContext context); void writeTinyInt(FieldReader reader, JsonOutputContext context); void writeSmallInt(FieldReader reader, JsonOutputContext context); void writeInt(FieldReader reader, JsonOutputContext context); void writeBigInt(FieldReader reader, JsonOutputContext context); void writeFloat(FieldReader reader, JsonOutputContext context); void writeDouble(FieldReader reader, JsonOutputContext context); void writeVarChar(FieldReader reader, JsonOutputContext context); void writeVar16Char(FieldReader reader, JsonOutputContext context); void writeVarBinary(FieldReader reader, JsonOutputContext context); void writeBit(FieldReader reader, JsonOutputContext context); void writeDateMilli(FieldReader reader, JsonOutputContext context); void writeDate(FieldReader reader, JsonOutputContext context); void writeTimeMilli(FieldReader reader, JsonOutputContext context); void writeTime(FieldReader reader, JsonOutputContext context); void writeTimeStampMilli(FieldReader reader, JsonOutputContext context); void writeIntervalYear(FieldReader reader, JsonOutputContext context); void writeIntervalDay(FieldReader reader, JsonOutputContext context); void writeNull(JsonOutputContext context); void writeUnion(FieldReader reader, JsonOutputContext context); void writeMap(FieldReader reader, JsonOutputContext context); void writeList(FieldReader reader, JsonOutputContext context); } | DataJsonOutput { public static final boolean isNumberAsString(DatabindContext context) { Object attr = context.getAttribute(DataJsonOutput.DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE); return attr instanceof Boolean && ((Boolean)attr).booleanValue(); } DataJsonOutput(JsonGenerator gen, boolean convertNumbersToStrings); static final ObjectWriter setNumbersAsStrings(ObjectWriter writer, boolean isEnabled); static final boolean isNumberAsString(DatabindContext context); void writeStartArray(); void writeEndArray(); void writeStartObject(); void writeEndObject(); void writeFieldName(String name); void writeVarChar(String value); void writeBoolean(boolean value); void writeDecimal(FieldReader reader, JsonOutputContext context); void writeTinyInt(FieldReader reader, JsonOutputContext context); void writeSmallInt(FieldReader reader, JsonOutputContext context); void writeInt(FieldReader reader, JsonOutputContext context); void writeBigInt(FieldReader reader, JsonOutputContext context); void writeFloat(FieldReader reader, JsonOutputContext context); void writeDouble(FieldReader reader, JsonOutputContext context); void writeVarChar(FieldReader reader, JsonOutputContext context); void writeVar16Char(FieldReader reader, JsonOutputContext context); void writeVarBinary(FieldReader reader, JsonOutputContext context); void writeBit(FieldReader reader, JsonOutputContext context); void writeDateMilli(FieldReader reader, JsonOutputContext context); void writeDate(FieldReader reader, JsonOutputContext context); void writeTimeMilli(FieldReader reader, JsonOutputContext context); void writeTime(FieldReader reader, JsonOutputContext context); void writeTimeStampMilli(FieldReader reader, JsonOutputContext context); void writeIntervalYear(FieldReader reader, JsonOutputContext context); void writeIntervalDay(FieldReader reader, JsonOutputContext context); void writeNull(JsonOutputContext context); void writeUnion(FieldReader reader, JsonOutputContext context); void writeMap(FieldReader reader, JsonOutputContext context); void writeList(FieldReader reader, JsonOutputContext context); static final DateTimeFormatter FORMAT_DATE; static final DateTimeFormatter FORMAT_TIMESTAMP; static final DateTimeFormatter FORMAT_TIME; static final String DREMIO_JOB_DATA_NUMBERS_AS_STRINGS_ATTRIBUTE; } |
@Test public void testNoDuplicateUUID() throws Exception { DACConfig dacConfig = DACConfig.newConfig(); Upgrade upgrade = new Upgrade(dacConfig, CLASSPATH_SCAN_RESULT, false); List<? extends UpgradeTask> tasks = upgrade.getUpgradeTasks(); Set<String> uuidToCount = new HashSet<>(); tasks.forEach(task -> assertTrue( String.format( "Task %s has duplicate UUID. Use some other UUID. For example: %s", task.getTaskName(), UUID.randomUUID().toString()), uuidToCount.add(task.getTaskUUID()))); } | @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } | Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } } | Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); } | Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); void run(); void run(boolean noDBOpenRetry); @VisibleForTesting void validateUpgrade(final LegacyKVStoreProvider storeProvider, final String curEdition); void run(final LegacyKVStoreProvider storeProvider); static void main(String[] args); } | Upgrade { @VisibleForTesting List<? extends UpgradeTask> getUpgradeTasks() { return upgradeTasks; } Upgrade(DACConfig dacConfig, ScanResult classPathScan, boolean verbose); void run(); void run(boolean noDBOpenRetry); @VisibleForTesting void validateUpgrade(final LegacyKVStoreProvider storeProvider, final String curEdition); void run(final LegacyKVStoreProvider storeProvider); static void main(String[] args); static final Comparator<Version> UPGRADE_VERSION_ORDERING; } |
@Test public void withoutUniqueConnPropsWithCacheExplicitlyDisabled() throws Exception { final DremioFileSystemCache dfsc = new DremioFileSystemCache(); final URI uri = URI.create("file: Configuration conf1 = new Configuration(); final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); conf1.setBoolean(disableCacheName, true); FileSystem fs1 = dfsc.get(uri, conf1, null); Configuration conf2 = new Configuration(conf1); conf2.set("blah", "boo"); FileSystem fs2 = dfsc.get(uri, conf2, null); assertTrue(fs1 != fs2); FileSystem fs3 = getAs("newUser", dfsc, uri, conf1, null); assertTrue(fs1 != fs3); assertTrue(fs1 != fs3); } | public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } | DremioFileSystemCache { public FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps) throws IOException{ final Key key = new Key(uri, conf, uniqueConnectionProps); FileSystem fs; synchronized (this) { fs = map.get(key); } if (fs != null) { return fs; } final String disableCacheName = String.format("fs.%s.impl.disable.cache", uri.getScheme()); final boolean disableCache = conf.getBoolean(disableCacheName, false); if (disableCache || key.uniqueConnectionPropValues == null || key.uniqueConnectionPropValues.isEmpty()) { return FileSystem.get(uri, conf); } final Configuration cloneConf = new Configuration(conf); cloneConf.set(disableCacheName, "true"); fs = FileSystem.get(uri, cloneConf); cloneConf.setBoolean(disableCacheName, disableCache); synchronized (this) { FileSystem oldfs = map.get(key); if (oldfs != null) { fs.close(); return oldfs; } if (map.isEmpty() && !ShutdownHookManager.get().isShutdownInProgress()) { ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY); } map.put(key, fs); if (conf.getBoolean(FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { toAutoClose.add(key); } return fs; } } FileSystem get(URI uri, Configuration conf, List<String> uniqueConnectionProps); synchronized void closeAll(boolean onlyAutomatic); } |
@Test public void withoutAnyTasksTriggeringTimeout() throws Exception { List<TimedRunnable<Void>> tasks = Lists.newArrayList(); for(int i=0; i<100; i++){ tasks.add(new TestTask(2000)); } TimedRunnable.run("Execution without triggering timeout", logger, tasks, 16); } | @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } |
@Test public void withTasksExceedingTimeout() throws Exception { UserException ex = null; try { List<TimedRunnable<Void>> tasks = Lists.newArrayList(); for (int i = 0; i < 100; i++) { if ((i & (i + 1)) == 0) { tasks.add(new TestTask(2000)); } else { tasks.add(new TestTask(20000)); } } TimedRunnable.run("Execution with some tasks triggering timeout", logger, tasks, 16); } catch (UserException e) { ex = e; } assertNotNull("Expected a UserException", ex); assertThat(ex.getMessage(), containsString("Waited for 93750ms, but tasks for 'Execution with some tasks triggering timeout' are not " + "complete. Total runnable size 100, parallelism 16.")); } | @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } |
@Test public void withManyTasks() throws Exception { List<TimedRunnable<Void>> tasks = Lists.newArrayList(); for (int i = 0; i < 150000; i++) { tasks.add(new TestTask(0)); } TimedRunnable.run("Execution with lots of tasks", logger, tasks, 16); } | @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } |
@Test public void withOverriddenHighTimeout() throws Exception { List<TimedRunnable<Void>> tasks = Lists.newArrayList(); for(int i=0; i<10; i++){ tasks.add(new TestTask(20_000)); } TimedRunnable.run("Execution without triggering timeout", logger, tasks, 2, 150_000); } | @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } | TimedRunnable implements Runnable { @Override public final void run() { long start = System.nanoTime(); threadStart=start; try{ value = runInner(); }catch(Exception e){ this.e = e; }finally{ timeNanos = System.nanoTime() - start; } } @Override final void run(); long getThreadStart(); long getTimeSpentNanos(); final V getValue(); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism); static List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables,
int parallelism, long timeout); } |
@Test public void testOptionManagerSetup() throws Exception { try (final QueryContext queryContext = new QueryContext(session(), getSabotContext(), UserBitShared.QueryId.getDefaultInstance());) { final OptionManagerWrapper optionManager = (OptionManagerWrapper) queryContext.getOptions(); final List<OptionManager> optionManagerList = optionManager.getOptionManagers(); assertEquals(4, optionManagerList.size()); assertTrue(optionManagerList.get(0) instanceof QueryOptionManager); assertTrue(optionManagerList.get(1) instanceof SessionOptionManager); assertTrue(optionManagerList.get(2) instanceof EagerCachingOptionManager); assertTrue(optionManagerList.get(3) instanceof DefaultOptionManager); } } | public OptionManager getOptions() { return optionManager; } | QueryContext implements AutoCloseable, ResourceSchedulingContext, OptimizerRulesContext { public OptionManager getOptions() { return optionManager; } } | QueryContext implements AutoCloseable, ResourceSchedulingContext, OptimizerRulesContext { public OptionManager getOptions() { return optionManager; } QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
Optional<Boolean> checkMetadataValidity
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker
); private QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker,
Optional<Boolean> checkMetadataValidity
); } | QueryContext implements AutoCloseable, ResourceSchedulingContext, OptimizerRulesContext { public OptionManager getOptions() { return optionManager; } QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
Optional<Boolean> checkMetadataValidity
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker
); private QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker,
Optional<Boolean> checkMetadataValidity
); CatalogService getCatalogService(); Catalog getCatalog(); AccelerationManager getAccelerationManager(); SubstitutionProviderFactory getSubstitutionProviderFactory(); RuleSet getInjectedRules(PlannerPhase phase); @Override QueryId getQueryId(); @Override PlannerSettings getPlannerSettings(); UserSession getSession(); @Override BufferAllocator getAllocator(); @Override String getQueryUserName(); OptionManager getOptions(); QueryOptionManager getQueryOptionManager(); SessionOptionManager getSessionOptionManager(); SystemOptionManager getSystemOptionManager(); ExecutionControls getExecutionControls(); @Override NodeEndpoint getCurrentEndpoint(); LogicalPlanPersistence getLpPersistence(); @Override Collection<NodeEndpoint> getActiveEndpoints(); SabotConfig getConfig(); OptionList getNonDefaultOptions(); @Override FunctionImplementationRegistry getFunctionRegistry(); boolean isUserAuthenticationEnabled(); ScanResult getScanResult(); OperatorTable getOperatorTable(); @Override QueryContextInformation getQueryContextInfo(); @Override ContextInformation getContextInformation(); @Override ArrowBuf getManagedBuffer(); @Override PartitionExplorer getPartitionExplorer(); @Override int registerFunctionErrorContext(FunctionErrorContext errorContext); @Override FunctionErrorContext getFunctionErrorContext(int errorContextId); @Override FunctionErrorContext getFunctionErrorContext(); MaterializationDescriptorProvider getMaterializationProvider(); Provider<WorkStats> getWorkStatsProvider(); WorkloadType getWorkloadType(); @Override BufferManager getBufferManager(); @Override ValueHolder getConstantValueHolder(String value, MinorType type, Function<ArrowBuf, ValueHolder> holderInitializer); void setGroupResourceInformation(GroupResourceInformation groupResourceInformation); GroupResourceInformation getGroupResourceInformation(); @Override void close(); @Override CompilationOptions getCompilationOptions(); ExecutorService getExecutorService(); } | QueryContext implements AutoCloseable, ResourceSchedulingContext, OptimizerRulesContext { public OptionManager getOptions() { return optionManager; } QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
Optional<Boolean> checkMetadataValidity
); QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker
); private QueryContext(
final UserSession session,
final SabotContext sabotContext,
QueryId queryId,
QueryPriority priority,
long maxAllocation,
Predicate<DatasetConfig> datasetValidityChecker,
Optional<Boolean> checkMetadataValidity
); CatalogService getCatalogService(); Catalog getCatalog(); AccelerationManager getAccelerationManager(); SubstitutionProviderFactory getSubstitutionProviderFactory(); RuleSet getInjectedRules(PlannerPhase phase); @Override QueryId getQueryId(); @Override PlannerSettings getPlannerSettings(); UserSession getSession(); @Override BufferAllocator getAllocator(); @Override String getQueryUserName(); OptionManager getOptions(); QueryOptionManager getQueryOptionManager(); SessionOptionManager getSessionOptionManager(); SystemOptionManager getSystemOptionManager(); ExecutionControls getExecutionControls(); @Override NodeEndpoint getCurrentEndpoint(); LogicalPlanPersistence getLpPersistence(); @Override Collection<NodeEndpoint> getActiveEndpoints(); SabotConfig getConfig(); OptionList getNonDefaultOptions(); @Override FunctionImplementationRegistry getFunctionRegistry(); boolean isUserAuthenticationEnabled(); ScanResult getScanResult(); OperatorTable getOperatorTable(); @Override QueryContextInformation getQueryContextInfo(); @Override ContextInformation getContextInformation(); @Override ArrowBuf getManagedBuffer(); @Override PartitionExplorer getPartitionExplorer(); @Override int registerFunctionErrorContext(FunctionErrorContext errorContext); @Override FunctionErrorContext getFunctionErrorContext(int errorContextId); @Override FunctionErrorContext getFunctionErrorContext(); MaterializationDescriptorProvider getMaterializationProvider(); Provider<WorkStats> getWorkStatsProvider(); WorkloadType getWorkloadType(); @Override BufferManager getBufferManager(); @Override ValueHolder getConstantValueHolder(String value, MinorType type, Function<ArrowBuf, ValueHolder> holderInitializer); void setGroupResourceInformation(GroupResourceInformation groupResourceInformation); GroupResourceInformation getGroupResourceInformation(); @Override void close(); @Override CompilationOptions getCompilationOptions(); ExecutorService getExecutorService(); } |
@Test public void testReadIntoArrowBuf() throws Exception { try (final ArrowBuf buffer = allocator.buffer(256)) { final InputStream inputStream = mock(InputStream.class); when(inputStream.read(any(byte[].class))).thenReturn(0); readIntoArrowBuf(inputStream, buffer, 0); assertEquals(0, buffer.writerIndex()); } try (final ArrowBuf buffer = allocator.buffer(256)) { final InputStream inputStream = mock(InputStream.class); when(inputStream.read(any(byte[].class), any(int.class), any(int.class))).thenAnswer(new Answer() { @Override public Integer answer(InvocationOnMock invocation) throws Throwable { byte[] byteBuf = invocation.getArgumentAt(0, byte[].class); int start = invocation.getArgumentAt(1, int.class); int length = invocation.getArgumentAt(2, int.class); for(int i = start; i < Math.min(length, byteBuf.length); i++) { byteBuf[i] = (byte)i; } return Math.min(length, byteBuf.length); } }); readIntoArrowBuf(inputStream, buffer, 256); assertEquals(256, buffer.writerIndex()); for(int i=0; i<256; i++) { assertEquals((byte)i, buffer.getByte(i)); } } try (final ArrowBuf buffer = allocator.buffer(256)) { final InputStream inputStream = mock(InputStream.class); when(inputStream.read(any(byte[].class), any(int.class), any(int.class))).thenAnswer(new Answer() { @Override public Integer answer(InvocationOnMock invocation) throws Throwable { byte[] byteBuf = invocation.getArgumentAt(0, byte[].class); int start = invocation.getArgumentAt(1, int.class); int length = invocation.getArgumentAt(2, int.class); int i=start; int toFill = Math.min(byteBuf.length, 20); toFill = Math.min(toFill, length); while(i<toFill) { byteBuf[i] = (byte)i; i++; } return i; } }); readIntoArrowBuf(inputStream, buffer, 256); assertEquals(256, buffer.writerIndex()); for(int i=0; i<256; i++) { assertEquals((byte)(i%20), buffer.getByte(i)); } } try (final ArrowBuf buffer = allocator.buffer(256)) { final InputStream inputStream = mock(InputStream.class); when(inputStream.read(any(byte[].class), any(int.class), any(int.class))).thenReturn(-1); try { readIntoArrowBuf(inputStream, buffer, 256); fail("Expected above call to fail"); } catch (EOFException ex) { } } } | public static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead) throws IOException { final byte[] buffer = REUSABLE_LARGE_BUFFER.get(); while(numBytesToRead > 0) { int len = (int) Math.min(buffer.length, numBytesToRead); final int numBytesRead = inputStream.read(buffer, 0, len); if (numBytesRead == -1 && numBytesToRead > 0) { throw new EOFException("Unexpected end of stream while reading."); } outputBuffer.writeBytes(buffer, 0, numBytesRead); numBytesToRead -= numBytesRead; } } | VectorAccessibleSerializable extends AbstractStreamSerializable { public static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead) throws IOException { final byte[] buffer = REUSABLE_LARGE_BUFFER.get(); while(numBytesToRead > 0) { int len = (int) Math.min(buffer.length, numBytesToRead); final int numBytesRead = inputStream.read(buffer, 0, len); if (numBytesRead == -1 && numBytesToRead > 0) { throw new EOFException("Unexpected end of stream while reading."); } outputBuffer.writeBytes(buffer, 0, numBytesRead); numBytesToRead -= numBytesRead; } } } | VectorAccessibleSerializable extends AbstractStreamSerializable { public static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead) throws IOException { final byte[] buffer = REUSABLE_LARGE_BUFFER.get(); while(numBytesToRead > 0) { int len = (int) Math.min(buffer.length, numBytesToRead); final int numBytesRead = inputStream.read(buffer, 0, len); if (numBytesRead == -1 && numBytesToRead > 0) { throw new EOFException("Unexpected end of stream while reading."); } outputBuffer.writeBytes(buffer, 0, numBytesRead); numBytesToRead -= numBytesRead; } } VectorAccessibleSerializable(BufferAllocator allocator); VectorAccessibleSerializable(BufferAllocator allocator, boolean useCodec, BufferAllocator decompressAllocator); VectorAccessibleSerializable(WritableBatch batch, BufferAllocator allocator); VectorAccessibleSerializable(WritableBatch batch, SelectionVector2 sv2, BufferAllocator allocator, boolean useCodec); } | VectorAccessibleSerializable extends AbstractStreamSerializable { public static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead) throws IOException { final byte[] buffer = REUSABLE_LARGE_BUFFER.get(); while(numBytesToRead > 0) { int len = (int) Math.min(buffer.length, numBytesToRead); final int numBytesRead = inputStream.read(buffer, 0, len); if (numBytesRead == -1 && numBytesToRead > 0) { throw new EOFException("Unexpected end of stream while reading."); } outputBuffer.writeBytes(buffer, 0, numBytesRead); numBytesToRead -= numBytesRead; } } VectorAccessibleSerializable(BufferAllocator allocator); VectorAccessibleSerializable(BufferAllocator allocator, boolean useCodec, BufferAllocator decompressAllocator); VectorAccessibleSerializable(WritableBatch batch, BufferAllocator allocator); VectorAccessibleSerializable(WritableBatch batch, SelectionVector2 sv2, BufferAllocator allocator, boolean useCodec); @Override void readFromStream(InputStream input); @Override void writeToStream(OutputStream output); void clear(); VectorContainer get(); SelectionVector2 getSv2(); static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead); long compressionTime(); long uncompressionTime(); static void readFromStream(SeekableInputStream input, final ArrowBuf outputBuffer, final int bytesToRead); } | VectorAccessibleSerializable extends AbstractStreamSerializable { public static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead) throws IOException { final byte[] buffer = REUSABLE_LARGE_BUFFER.get(); while(numBytesToRead > 0) { int len = (int) Math.min(buffer.length, numBytesToRead); final int numBytesRead = inputStream.read(buffer, 0, len); if (numBytesRead == -1 && numBytesToRead > 0) { throw new EOFException("Unexpected end of stream while reading."); } outputBuffer.writeBytes(buffer, 0, numBytesRead); numBytesToRead -= numBytesRead; } } VectorAccessibleSerializable(BufferAllocator allocator); VectorAccessibleSerializable(BufferAllocator allocator, boolean useCodec, BufferAllocator decompressAllocator); VectorAccessibleSerializable(WritableBatch batch, BufferAllocator allocator); VectorAccessibleSerializable(WritableBatch batch, SelectionVector2 sv2, BufferAllocator allocator, boolean useCodec); @Override void readFromStream(InputStream input); @Override void writeToStream(OutputStream output); void clear(); VectorContainer get(); SelectionVector2 getSv2(); static void readIntoArrowBuf(InputStream inputStream, ArrowBuf outputBuffer, long numBytesToRead); long compressionTime(); long uncompressionTime(); static void readFromStream(SeekableInputStream input, final ArrowBuf outputBuffer, final int bytesToRead); static final int RAW_CHUNK_SIZE_TO_COMPRESS; } |
@Test public void testGetNonDefaultOptions() throws Exception { OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionValidatorProvider(optionValidatorListing) .withOptionManager(defaultOptionManager) .withOptionManager(systemOptionManager) .withOptionManager(sessionOptionManager) .withOptionManager(queryOptionManager) .build(); int initialOptionsCount = defaultOptionManager.getNonDefaultOptions().size() + systemOptionManager.getNonDefaultOptions().size() + sessionOptionManager.getNonDefaultOptions().size() + queryOptionManager.getNonDefaultOptions().size(); List<OptionValue> optionValues = Arrays.asList( OptionValue.createLong(OptionValue.OptionType.SYSTEM, SLICE_TARGET, 10), OptionValue.createLong(OptionValue.OptionType.SESSION, SLICE_TARGET, 15), OptionValue.createLong(OptionValue.OptionType.QUERY, SLICE_TARGET, 20), OptionValue.createBoolean(OptionValue.OptionType.SESSION, ENABLE_VERBOSE_ERRORS_KEY, true), OptionValue.createBoolean(OptionValue.OptionType.QUERY, ENABLE_VERBOSE_ERRORS_KEY, true) ); optionValues.forEach(optionManager::setOption); OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); assertEquals(initialOptionsCount + optionValues.size(), nonDefaultOptions.size()); for (OptionValue optionValue : optionValues) { assertTrue(nonDefaultOptions.contains(optionValue)); } for (OptionValue nonDefaultOption : nonDefaultOptions) { assertNotEquals(nonDefaultOption, defaultOptionManager.getOption(nonDefaultOption.getName())); } } | @Override public OptionList getNonDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); optionList.merge(nonDefaultOptions); } return optionList; } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); optionList.merge(nonDefaultOptions); } return optionList; } } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); optionList.merge(nonDefaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); optionList.merge(nonDefaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList nonDefaultOptions = optionManager.getNonDefaultOptions(); optionList.merge(nonDefaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } |
@Test public void testGetDefaultOptions() throws Exception { OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionValidatorProvider(optionValidatorListing) .withOptionManager(defaultOptionManager) .withOptionManager(systemOptionManager) .withOptionManager(sessionOptionManager) .withOptionManager(queryOptionManager) .build(); OptionList defaultOptions = optionManager.getDefaultOptions(); assertEquals(defaultOptionManager.getDefaultOptions().size(), defaultOptions.size()); for (OptionValue defaultOption : defaultOptions) { assertEquals(defaultOption, optionValidatorListing.getValidator(defaultOption.getName()).getDefault()); } } | @Override public OptionList getDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList defaultOptions = optionManager.getDefaultOptions(); optionList.merge(defaultOptions); } return optionList; } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList defaultOptions = optionManager.getDefaultOptions(); optionList.merge(defaultOptions); } return optionList; } } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList defaultOptions = optionManager.getDefaultOptions(); optionList.merge(defaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList defaultOptions = optionManager.getDefaultOptions(); optionList.merge(defaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } | OptionManagerWrapper extends BaseOptionManager { @Override public OptionList getDefaultOptions() { final OptionList optionList = new OptionList(); for (OptionManager optionManager : optionManagers) { OptionList defaultOptions = optionManager.getDefaultOptions(); optionList.merge(defaultOptions); } return optionList; } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } |
@Test public void testIterator() throws Exception { OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionValidatorProvider(optionValidatorListing) .withOptionManager(defaultOptionManager) .withOptionManager(systemOptionManager) .withOptionManager(sessionOptionManager) .withOptionManager(queryOptionManager) .build(); int initialSystemOptionCount = systemOptionManager.getNonDefaultOptions().size(); int initialOptionsCount = defaultOptionManager.getNonDefaultOptions().size() + initialSystemOptionCount + sessionOptionManager.getNonDefaultOptions().size() + queryOptionManager.getNonDefaultOptions().size(); int defaultOptionsCount = optionValidatorListing.getValidatorList().size(); List<OptionValue> optionValues = Arrays.asList( OptionValue.createLong(OptionValue.OptionType.SYSTEM, SLICE_TARGET, 10), OptionValue.createLong(OptionValue.OptionType.SESSION, SLICE_TARGET, 15), OptionValue.createLong(OptionValue.OptionType.QUERY, SLICE_TARGET, 20), OptionValue.createBoolean(OptionValue.OptionType.SESSION, ENABLE_VERBOSE_ERRORS_KEY, true), OptionValue.createBoolean(OptionValue.OptionType.QUERY, ENABLE_VERBOSE_ERRORS_KEY, true) ); AtomicInteger systemOptionsCount = new AtomicInteger(initialSystemOptionCount); optionValues.forEach(optionValue -> { optionManager.setOption(optionValue); if (optionValue.getType().equals(OptionValue.OptionType.SYSTEM)) { systemOptionsCount.addAndGet(1); } }); OptionList iteratorResult = new OptionList(); optionManager.iterator().forEachRemaining(iteratorResult::add); assertEquals(initialOptionsCount + defaultOptionsCount + optionValues.size() - systemOptionsCount.get(), iteratorResult.size()); } | @Override public Iterator<OptionValue> iterator() { final OptionList resultList = new OptionList(); final Map<String, OptionValue> optionsMap = CaseInsensitiveMap.newHashMap(); final OptionList defaultOptions = getDefaultOptions(); defaultOptions.forEach(optionValue -> optionsMap.put(optionValue.getName(), optionValue)); final List<OptionManager> reversedOptionManagers = Lists.reverse(optionManagers); for (OptionManager optionManager : reversedOptionManagers) { OptionList optionList = optionManager.getNonDefaultOptions(); for (OptionValue optionValue : optionList) { if (optionValue.getType() == optionsMap.get(optionValue.getName()).getType()) { optionsMap.put(optionValue.getName(), optionValue); } else { resultList.add(optionValue); } } } resultList.addAll(optionsMap.values()); return resultList.iterator(); } | OptionManagerWrapper extends BaseOptionManager { @Override public Iterator<OptionValue> iterator() { final OptionList resultList = new OptionList(); final Map<String, OptionValue> optionsMap = CaseInsensitiveMap.newHashMap(); final OptionList defaultOptions = getDefaultOptions(); defaultOptions.forEach(optionValue -> optionsMap.put(optionValue.getName(), optionValue)); final List<OptionManager> reversedOptionManagers = Lists.reverse(optionManagers); for (OptionManager optionManager : reversedOptionManagers) { OptionList optionList = optionManager.getNonDefaultOptions(); for (OptionValue optionValue : optionList) { if (optionValue.getType() == optionsMap.get(optionValue.getName()).getType()) { optionsMap.put(optionValue.getName(), optionValue); } else { resultList.add(optionValue); } } } resultList.addAll(optionsMap.values()); return resultList.iterator(); } } | OptionManagerWrapper extends BaseOptionManager { @Override public Iterator<OptionValue> iterator() { final OptionList resultList = new OptionList(); final Map<String, OptionValue> optionsMap = CaseInsensitiveMap.newHashMap(); final OptionList defaultOptions = getDefaultOptions(); defaultOptions.forEach(optionValue -> optionsMap.put(optionValue.getName(), optionValue)); final List<OptionManager> reversedOptionManagers = Lists.reverse(optionManagers); for (OptionManager optionManager : reversedOptionManagers) { OptionList optionList = optionManager.getNonDefaultOptions(); for (OptionValue optionValue : optionList) { if (optionValue.getType() == optionsMap.get(optionValue.getName()).getType()) { optionsMap.put(optionValue.getName(), optionValue); } else { resultList.add(optionValue); } } } resultList.addAll(optionsMap.values()); return resultList.iterator(); } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); } | OptionManagerWrapper extends BaseOptionManager { @Override public Iterator<OptionValue> iterator() { final OptionList resultList = new OptionList(); final Map<String, OptionValue> optionsMap = CaseInsensitiveMap.newHashMap(); final OptionList defaultOptions = getDefaultOptions(); defaultOptions.forEach(optionValue -> optionsMap.put(optionValue.getName(), optionValue)); final List<OptionManager> reversedOptionManagers = Lists.reverse(optionManagers); for (OptionManager optionManager : reversedOptionManagers) { OptionList optionList = optionManager.getNonDefaultOptions(); for (OptionValue optionValue : optionList) { if (optionValue.getType() == optionsMap.get(optionValue.getName()).getType()) { optionsMap.put(optionValue.getName(), optionValue); } else { resultList.add(optionValue); } } } resultList.addAll(optionsMap.values()); return resultList.iterator(); } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } | OptionManagerWrapper extends BaseOptionManager { @Override public Iterator<OptionValue> iterator() { final OptionList resultList = new OptionList(); final Map<String, OptionValue> optionsMap = CaseInsensitiveMap.newHashMap(); final OptionList defaultOptions = getDefaultOptions(); defaultOptions.forEach(optionValue -> optionsMap.put(optionValue.getName(), optionValue)); final List<OptionManager> reversedOptionManagers = Lists.reverse(optionManagers); for (OptionManager optionManager : reversedOptionManagers) { OptionList optionList = optionManager.getNonDefaultOptions(); for (OptionValue optionValue : optionList) { if (optionValue.getType() == optionsMap.get(optionValue.getName()).getType()) { optionsMap.put(optionValue.getName(), optionValue); } else { resultList.add(optionValue); } } } resultList.addAll(optionsMap.values()); return resultList.iterator(); } OptionManagerWrapper(OptionValidatorListing optionValidatorListing, List<OptionManager> optionManagers); OptionValidatorListing getOptionValidatorListing(); @VisibleForTesting List<OptionManager> getOptionManagers(); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionValue.OptionType type); @Override boolean deleteAllOptions(OptionValue.OptionType type); @Override OptionValue getOption(String name); @Override OptionList getDefaultOptions(); @Override OptionList getNonDefaultOptions(); OptionValidator getValidator(String name); @Override Iterator<OptionValue> iterator(); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); } |
@Test public void testConvert() throws Exception { TransformBase transform = new TransformField("source", "new", false, new FieldConvertCase(LOWER_CASE).wrap()); validate(transform); } | public Transform wrap() { return acceptor.wrap(this); } | TransformBase { public Transform wrap() { return acceptor.wrap(this); } } | TransformBase { public Transform wrap() { return acceptor.wrap(this); } } | TransformBase { public Transform wrap() { return acceptor.wrap(this); } final T accept(TransformVisitor<T> visitor); Transform wrap(); @Override String toString(); static TransformBase unwrap(Transform t); static Converter<TransformBase, Transform> converter(); } | TransformBase { public Transform wrap() { return acceptor.wrap(this); } final T accept(TransformVisitor<T> visitor); Transform wrap(); @Override String toString(); static TransformBase unwrap(Transform t); static Converter<TransformBase, Transform> converter(); static final Acceptor<TransformBase, TransformVisitor<?>, Transform> acceptor; } |
@Test public void testBoolOptionToProto() { final OptionValue option = OptionValue.createBoolean(OptionValue.OptionType.SYSTEM, "test.option", true); final OptionValueProto optionProto = OptionValueProtoUtils.toOptionValueProto(option); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testLongOptionToProto() { final OptionValue option = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test.option", 1234); final OptionValueProto optionProto = OptionValueProtoUtils.toOptionValueProto(option); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testStringOptionToProto() { final OptionValue option = OptionValue.createString(OptionValue.OptionType.SYSTEM, "test.option", "test-option"); final OptionValueProto optionProto = OptionValueProtoUtils.toOptionValueProto(option); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testDoubleOptionToProto() { final OptionValue option = OptionValue.createDouble(OptionValue.OptionType.SYSTEM, "test.option", 1234.1234); final OptionValueProto optionProto = OptionValueProtoUtils.toOptionValueProto(option); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValueProto toOptionValueProto(OptionValue optionValue) { checkArgument(optionValue.getType() == OptionValue.OptionType.SYSTEM, String.format("Invalid OptionType. OptionType must be 'SYSTEM', was given '%s'", optionValue.getType())); final OptionValue.Kind kind = optionValue.getKind(); final OptionValueProto.Builder builder = OptionValueProto.newBuilder() .setName(optionValue.getName()); switch (kind) { case BOOLEAN: builder.setBoolVal(optionValue.getBoolVal()); break; case LONG: builder.setNumVal(optionValue.getNumVal()); break; case STRING: builder.setStringVal(optionValue.getStringVal()); break; case DOUBLE: builder.setFloatVal(optionValue.getFloatVal()); break; default: throw new IllegalArgumentException("Invalid OptionValue kind"); } return builder.build(); } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testBoolOptionFromProto() { final OptionValueProto optionProto = OptionValueProto.newBuilder() .setName("test.option") .setBoolVal(true) .build(); final OptionValue option = OptionValueProtoUtils.toOptionValue(optionProto); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testLongOptionFromProto() { final OptionValueProto optionProto = OptionValueProto.newBuilder() .setName("test.option") .setNumVal(1234) .build(); final OptionValue option = OptionValueProtoUtils.toOptionValue(optionProto); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testStringOptionFromProto() { final OptionValueProto optionProto = OptionValueProto.newBuilder() .setName("test.option") .setStringVal("test-option") .build(); final OptionValue option = OptionValueProtoUtils.toOptionValue(optionProto); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testFloatOptionFromProto() { final OptionValueProto optionProto = OptionValueProto.newBuilder() .setName("test.option") .setFloatVal(1234.1234) .build(); final OptionValue option = OptionValueProtoUtils.toOptionValue(optionProto); assertTrue(verifyEquivalent(option, optionProto)); } | public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } | OptionValueProtoUtils { public static OptionValue toOptionValue(OptionValueProto value) { switch (value.getOptionValCase()) { case NUM_VAL: return OptionValue.createLong( OptionValue.OptionType.SYSTEM, value.getName(), value.getNumVal() ); case STRING_VAL: return OptionValue.createString( OptionValue.OptionType.SYSTEM, value.getName(), value.getStringVal() ); case BOOL_VAL: return OptionValue.createBoolean( OptionValue.OptionType.SYSTEM, value.getName(), value.getBoolVal() ); case FLOAT_VAL: return OptionValue.createDouble( OptionValue.OptionType.SYSTEM, value.getName(), value.getFloatVal() ); case OPTIONVAL_NOT_SET: default: throw new IllegalArgumentException("Invalid OptionValue kind"); } } private OptionValueProtoUtils(); static OptionValueProto toOptionValueProto(OptionValue optionValue); static OptionValue toOptionValue(OptionValueProto value); static OptionValueProtoList toOptionValueProtoList(Collection<OptionValueProto> optionValueProtos); } |
@Test public void testGet() { registerTestOption(OptionValue.Kind.LONG, "test-option", "0"); OptionValue optionValue = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option", 123); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Collections.singletonList(OptionValueProtoUtils.toOptionValueProto(optionValue))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); assertEquals(optionValue, som.getOption(optionValue.getName())); verify(kvStore, times(1)).get(eq(OPTIONS_KEY)); assertNull(som.getOption("not-a-real-option")); } | @Override public OptionValue getOption(final String name) { final OptionValueProto value = getOptionProto(name); return value == null ? null : OptionValueProtoUtils.toOptionValue(value); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionValue getOption(final String name) { final OptionValueProto value = getOptionProto(name); return value == null ? null : OptionValueProtoUtils.toOptionValue(value); } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionValue getOption(final String name) { final OptionValueProto value = getOptionProto(name); return value == null ? null : OptionValueProtoUtils.toOptionValue(value); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionValue getOption(final String name) { final OptionValueProto value = getOptionProto(name); return value == null ? null : OptionValueProtoUtils.toOptionValue(value); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionValue getOption(final String name) { final OptionValueProto value = getOptionProto(name); return value == null ? null : OptionValueProtoUtils.toOptionValue(value); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testSet() { registerTestOption(OptionValue.Kind.LONG, "already-added-option", "0"); OptionValue toAddOptionDefault = registerTestOption(OptionValue.Kind.STRING, "to-add-option", "default-value"); OptionValue alreadyAddedOption = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "already-added-option", 123); OptionValue toAddOption = OptionValue.createString(OptionValue.OptionType.SYSTEM, "to-add-option", "some-value"); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Collections.singletonList(OptionValueProtoUtils.toOptionValueProto(alreadyAddedOption))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); som.setOption(alreadyAddedOption); verify(kvStore, times(0)).put(any(), any()); som.setOption(toAddOptionDefault); verify(kvStore, times(0)).put(any(), any()); som.setOption(toAddOption); ArgumentCaptor<OptionValueProtoList> argument = ArgumentCaptor.forClass(OptionValueProtoList.class); verify(kvStore, times(1)).put(eq(OPTIONS_KEY), argument.capture()); assertThat(argument.getValue().getOptionsList(), containsInAnyOrder(OptionValueProtoUtils.toOptionValueProto(toAddOption), OptionValueProtoUtils.toOptionValueProto(alreadyAddedOption)) ); OptionValue overridingOption = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "already-added-option", 999); som.setOption(overridingOption); verify(kvStore, times(1)).put(OPTIONS_KEY, OptionValueProtoList.newBuilder() .addAllOptions(Collections.singletonList(OptionValueProtoUtils.toOptionValueProto(overridingOption))) .build() ); } | @Override public boolean setOption(final OptionValue value) { checkArgument(value.getType() == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = value.getName().toLowerCase(Locale.ROOT); final OptionValidator validator = optionValidatorListing.getValidator(name); validator.validate(value); final Map<String, OptionValueProto> optionMap = new HashMap<>(); getOptionProtoList().forEach(optionProto -> optionMap.put(optionProto.getName(), optionProto)); if (optionMap.containsKey(name) && optionMap.get(name).equals(OptionValueProtoUtils.toOptionValueProto(value))) { return true; } if (value.equals(validator.getDefault())) { if (optionMap.containsKey(value.getName())) { optionMap.remove(value.getName()); } else { return true; } } optionMap.put(name, OptionValueProtoUtils.toOptionValueProto(value)); options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(optionMap.values())); notifyListeners(); return true; } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean setOption(final OptionValue value) { checkArgument(value.getType() == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = value.getName().toLowerCase(Locale.ROOT); final OptionValidator validator = optionValidatorListing.getValidator(name); validator.validate(value); final Map<String, OptionValueProto> optionMap = new HashMap<>(); getOptionProtoList().forEach(optionProto -> optionMap.put(optionProto.getName(), optionProto)); if (optionMap.containsKey(name) && optionMap.get(name).equals(OptionValueProtoUtils.toOptionValueProto(value))) { return true; } if (value.equals(validator.getDefault())) { if (optionMap.containsKey(value.getName())) { optionMap.remove(value.getName()); } else { return true; } } optionMap.put(name, OptionValueProtoUtils.toOptionValueProto(value)); options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(optionMap.values())); notifyListeners(); return true; } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean setOption(final OptionValue value) { checkArgument(value.getType() == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = value.getName().toLowerCase(Locale.ROOT); final OptionValidator validator = optionValidatorListing.getValidator(name); validator.validate(value); final Map<String, OptionValueProto> optionMap = new HashMap<>(); getOptionProtoList().forEach(optionProto -> optionMap.put(optionProto.getName(), optionProto)); if (optionMap.containsKey(name) && optionMap.get(name).equals(OptionValueProtoUtils.toOptionValueProto(value))) { return true; } if (value.equals(validator.getDefault())) { if (optionMap.containsKey(value.getName())) { optionMap.remove(value.getName()); } else { return true; } } optionMap.put(name, OptionValueProtoUtils.toOptionValueProto(value)); options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(optionMap.values())); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean setOption(final OptionValue value) { checkArgument(value.getType() == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = value.getName().toLowerCase(Locale.ROOT); final OptionValidator validator = optionValidatorListing.getValidator(name); validator.validate(value); final Map<String, OptionValueProto> optionMap = new HashMap<>(); getOptionProtoList().forEach(optionProto -> optionMap.put(optionProto.getName(), optionProto)); if (optionMap.containsKey(name) && optionMap.get(name).equals(OptionValueProtoUtils.toOptionValueProto(value))) { return true; } if (value.equals(validator.getDefault())) { if (optionMap.containsKey(value.getName())) { optionMap.remove(value.getName()); } else { return true; } } optionMap.put(name, OptionValueProtoUtils.toOptionValueProto(value)); options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(optionMap.values())); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean setOption(final OptionValue value) { checkArgument(value.getType() == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = value.getName().toLowerCase(Locale.ROOT); final OptionValidator validator = optionValidatorListing.getValidator(name); validator.validate(value); final Map<String, OptionValueProto> optionMap = new HashMap<>(); getOptionProtoList().forEach(optionProto -> optionMap.put(optionProto.getName(), optionProto)); if (optionMap.containsKey(name) && optionMap.get(name).equals(OptionValueProtoUtils.toOptionValueProto(value))) { return true; } if (value.equals(validator.getDefault())) { if (optionMap.containsKey(value.getName())) { optionMap.remove(value.getName()); } else { return true; } } optionMap.put(name, OptionValueProtoUtils.toOptionValueProto(value)); options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(optionMap.values())); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testVisitor() { TransformBase transform = new TransformExtract("source", "new", DatasetsUtil.pattern("\\d+", 0, IndexType.INDEX), false); String name = transform.accept(new TransformVisitor<String>() { @Override public String visit(TransformLookup lookup) throws Exception { return "lookup"; } @Override public String visit(TransformJoin join) throws Exception { return "join"; } @Override public String visit(TransformSort sort) throws Exception { return "sort"; } @Override public String visit(TransformSorts sortMultiple) throws Exception { return "sortMultiple"; } @Override public String visit(TransformDrop drop) throws Exception { return "drop"; } @Override public String visit(TransformRename rename) throws Exception { return "rename"; } @Override public String visit(TransformConvertCase convertCase) throws Exception { return "convertCase"; } @Override public String visit(TransformTrim trim) throws Exception { return "trim"; } @Override public String visit(TransformExtract extract) throws Exception { return "extract"; } @Override public String visit(TransformAddCalculatedField addCalculatedField) throws Exception { return "addCalculatedField"; } @Override public String visit(TransformUpdateSQL updateSQL) throws Exception { return "updateSQL"; } @Override public String visit(TransformField field) throws Exception { return "field"; } @Override public String visit(TransformConvertToSingleType convertToSingleType) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(TransformSplitByDataType splitByDataType) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(TransformGroupBy groupBy) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(TransformFilter filter) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(TransformCreateFromParent createFromParent) throws Exception { throw new UnsupportedOperationException("NYI"); } }); assertEquals("extract", name); } | public final <T> T accept(TransformVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } | TransformBase { public final <T> T accept(TransformVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | TransformBase { public final <T> T accept(TransformVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | TransformBase { public final <T> T accept(TransformVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(TransformVisitor<T> visitor); Transform wrap(); @Override String toString(); static TransformBase unwrap(Transform t); static Converter<TransformBase, Transform> converter(); } | TransformBase { public final <T> T accept(TransformVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(TransformVisitor<T> visitor); Transform wrap(); @Override String toString(); static TransformBase unwrap(Transform t); static Converter<TransformBase, Transform> converter(); static final Acceptor<TransformBase, TransformVisitor<?>, Transform> acceptor; } |
@Test public void testDelete() { registerTestOption(OptionValue.Kind.LONG, "added-option-0", "0"); registerTestOption(OptionValue.Kind.LONG, "added-option-1", "1"); registerTestOption(OptionValue.Kind.STRING, "not-added-option", "default-value"); OptionValue optionValue0 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "added-option-0", 100); OptionValue optionValue1 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "added-option-1", 111); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Arrays.asList( OptionValueProtoUtils.toOptionValueProto(optionValue0), OptionValueProtoUtils.toOptionValueProto(optionValue1))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); som.deleteOption("not-added-option", OptionValue.OptionType.SYSTEM); verify(kvStore, times(0)).put(any(), any()); som.deleteOption("added-option-0", OptionValue.OptionType.SYSTEM); verify(kvStore, times(1)).put(OPTIONS_KEY, OptionValueProtoList.newBuilder() .addAllOptions(Collections.singletonList(OptionValueProtoUtils.toOptionValueProto(optionValue1))) .build() ); } | @Override public boolean deleteOption(final String rawName, OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = rawName.toLowerCase(Locale.ROOT); optionValidatorListing.getValidator(name); final Pointer<Boolean> needUpdate = new Pointer<>(false); final List<OptionValueProto> newOptionValueProtoList = getOptionProtoList().stream() .filter(optionValueProto -> { if (name.equals(optionValueProto.getName())) { needUpdate.value = true; return false; } return true; }) .collect(Collectors.toList()); if (needUpdate.value) { options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(newOptionValueProtoList)); } notifyListeners(); return true; } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteOption(final String rawName, OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = rawName.toLowerCase(Locale.ROOT); optionValidatorListing.getValidator(name); final Pointer<Boolean> needUpdate = new Pointer<>(false); final List<OptionValueProto> newOptionValueProtoList = getOptionProtoList().stream() .filter(optionValueProto -> { if (name.equals(optionValueProto.getName())) { needUpdate.value = true; return false; } return true; }) .collect(Collectors.toList()); if (needUpdate.value) { options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(newOptionValueProtoList)); } notifyListeners(); return true; } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteOption(final String rawName, OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = rawName.toLowerCase(Locale.ROOT); optionValidatorListing.getValidator(name); final Pointer<Boolean> needUpdate = new Pointer<>(false); final List<OptionValueProto> newOptionValueProtoList = getOptionProtoList().stream() .filter(optionValueProto -> { if (name.equals(optionValueProto.getName())) { needUpdate.value = true; return false; } return true; }) .collect(Collectors.toList()); if (needUpdate.value) { options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(newOptionValueProtoList)); } notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteOption(final String rawName, OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = rawName.toLowerCase(Locale.ROOT); optionValidatorListing.getValidator(name); final Pointer<Boolean> needUpdate = new Pointer<>(false); final List<OptionValueProto> newOptionValueProtoList = getOptionProtoList().stream() .filter(optionValueProto -> { if (name.equals(optionValueProto.getName())) { needUpdate.value = true; return false; } return true; }) .collect(Collectors.toList()); if (needUpdate.value) { options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(newOptionValueProtoList)); } notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteOption(final String rawName, OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); final String name = rawName.toLowerCase(Locale.ROOT); optionValidatorListing.getValidator(name); final Pointer<Boolean> needUpdate = new Pointer<>(false); final List<OptionValueProto> newOptionValueProtoList = getOptionProtoList().stream() .filter(optionValueProto -> { if (name.equals(optionValueProto.getName())) { needUpdate.value = true; return false; } return true; }) .collect(Collectors.toList()); if (needUpdate.value) { options.put(OPTIONS_KEY, OptionValueProtoUtils.toOptionValueProtoList(newOptionValueProtoList)); } notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testDeleteAll() { registerTestOption(OptionValue.Kind.LONG, "test-option-0", "0"); registerTestOption(OptionValue.Kind.LONG, "test-option-1", "1"); OptionValue optionValue0 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-0", 100); OptionValue optionValue1 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-1", 111); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Arrays.asList( OptionValueProtoUtils.toOptionValueProto(optionValue0), OptionValueProtoUtils.toOptionValueProto(optionValue1))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); som.deleteAllOptions(OptionValue.OptionType.SYSTEM); verify(kvStore, times(1)).put(OPTIONS_KEY, OptionValueProtoList.newBuilder() .addAllOptions(Collections.emptyList()) .build() ); } | @Override public boolean deleteAllOptions(OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); options.put(OPTIONS_KEY, OptionValueProtoList.newBuilder().build()); notifyListeners(); return true; } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteAllOptions(OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); options.put(OPTIONS_KEY, OptionValueProtoList.newBuilder().build()); notifyListeners(); return true; } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteAllOptions(OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); options.put(OPTIONS_KEY, OptionValueProtoList.newBuilder().build()); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteAllOptions(OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); options.put(OPTIONS_KEY, OptionValueProtoList.newBuilder().build()); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean deleteAllOptions(OptionType type) { checkArgument(type == OptionType.SYSTEM, "OptionType must be SYSTEM."); options.put(OPTIONS_KEY, OptionValueProtoList.newBuilder().build()); notifyListeners(); return true; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testGetNonDefaultOptions() { registerTestOption(OptionValue.Kind.LONG, "test-option-0", "0"); registerTestOption(OptionValue.Kind.LONG, "test-option-1", "1"); OptionValue optionValue0 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-0", 100); OptionValue optionValue1 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-1", 111); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Arrays.asList( OptionValueProtoUtils.toOptionValueProto(optionValue0), OptionValueProtoUtils.toOptionValueProto(optionValue1))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); assertThat(som.getNonDefaultOptions(), containsInAnyOrder(optionValue0, optionValue1)); } | @Override public OptionList getNonDefaultOptions() { final OptionList nonDefaultOptions = new OptionList(); getOptionProtoList().forEach( entry -> nonDefaultOptions.add(OptionValueProtoUtils.toOptionValue(entry)) ); return nonDefaultOptions; } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList nonDefaultOptions = new OptionList(); getOptionProtoList().forEach( entry -> nonDefaultOptions.add(OptionValueProtoUtils.toOptionValue(entry)) ); return nonDefaultOptions; } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList nonDefaultOptions = new OptionList(); getOptionProtoList().forEach( entry -> nonDefaultOptions.add(OptionValueProtoUtils.toOptionValue(entry)) ); return nonDefaultOptions; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList nonDefaultOptions = new OptionList(); getOptionProtoList().forEach( entry -> nonDefaultOptions.add(OptionValueProtoUtils.toOptionValue(entry)) ); return nonDefaultOptions; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public OptionList getNonDefaultOptions() { final OptionList nonDefaultOptions = new OptionList(); getOptionProtoList().forEach( entry -> nonDefaultOptions.add(OptionValueProtoUtils.toOptionValue(entry)) ); return nonDefaultOptions; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testIterator() { registerTestOption(OptionValue.Kind.LONG, "test-option-0", "0"); registerTestOption(OptionValue.Kind.LONG, "test-option-1", "1"); OptionValue optionValue0 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-0", 100); OptionValue optionValue1 = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "test-option-1", 111); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Arrays.asList( OptionValueProtoUtils.toOptionValueProto(optionValue0), OptionValueProtoUtils.toOptionValueProto(optionValue1))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); assertThat(Lists.from(som.iterator()), containsInAnyOrder(optionValue0, optionValue1)); } | @Override public Iterator<OptionValue> iterator() { return getOptionProtoList().stream() .map(OptionValueProtoUtils::toOptionValue) .iterator(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public Iterator<OptionValue> iterator() { return getOptionProtoList().stream() .map(OptionValueProtoUtils::toOptionValue) .iterator(); } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public Iterator<OptionValue> iterator() { return getOptionProtoList().stream() .map(OptionValueProtoUtils::toOptionValue) .iterator(); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public Iterator<OptionValue> iterator() { return getOptionProtoList().stream() .map(OptionValueProtoUtils::toOptionValue) .iterator(); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public Iterator<OptionValue> iterator() { return getOptionProtoList().stream() .map(OptionValueProtoUtils::toOptionValue) .iterator(); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testIsSet() { registerTestOption(OptionValue.Kind.LONG, "set-option", "0"); registerTestOption(OptionValue.Kind.LONG, "not-set-option", "1"); OptionValue optionValue = OptionValue.createLong(OptionValue.OptionType.SYSTEM, "set-option", 123); OptionValueProtoList optionList = OptionValueProtoList.newBuilder() .addAllOptions(Collections.singletonList(OptionValueProtoUtils.toOptionValueProto(optionValue))) .build(); when(kvStore.get(OPTIONS_KEY)).thenReturn(optionList); assertTrue(som.isSet("set-option")); assertFalse(som.isSet("not-set-option")); } | @Override public boolean isSet(String name){ return getOptionProto(name) != null; } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isSet(String name){ return getOptionProto(name) != null; } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isSet(String name){ return getOptionProto(name) != null; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isSet(String name){ return getOptionProto(name) != null; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isSet(String name){ return getOptionProto(name) != null; } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testIsValid() { registerTestOption(OptionValue.Kind.LONG, "valid-option", "0"); assertTrue(som.isValid("valid-option")); assertFalse(som.isValid("invalid-option")); } | @Override public boolean isValid(String name){ return optionValidatorListing.isValid(name); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isValid(String name){ return optionValidatorListing.isValid(name); } } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isValid(String name){ return optionValidatorListing.isValid(name); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isValid(String name){ return optionValidatorListing.isValid(name); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } | SystemOptionManager extends BaseOptionManager implements Service, ProjectOptionManager { @Override public boolean isValid(String name){ return optionValidatorListing.isValid(name); } SystemOptionManager(OptionValidatorListing optionValidatorListing,
LogicalPlanPersistence lpPersistence,
final Provider<LegacyKVStoreProvider> storeProvider,
boolean inMemory); @Override void start(); @Override boolean isValid(String name); @Override boolean isSet(String name); @Override Iterator<OptionValue> iterator(); @Override OptionValue getOption(final String name); @Override boolean setOption(final OptionValue value); @Override boolean deleteOption(final String rawName, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override void addOptionChangeListener(OptionChangeListener optionChangeListener); @Override OptionList getNonDefaultOptions(); @Override void close(); } |
@Test public void testGetOption() { final OptionManager eagerCachingOptionManager = new EagerCachingOptionManager(optionManager); assertEquals(optionValueA, eagerCachingOptionManager.getOption(optionValueA.getName())); verify(optionManager, times(0)).getOption(optionValueA.getName()); } | @Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); } } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); } EagerCachingOptionManager(OptionManager delegate); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public double getOption(DoubleValidator validator) { return getOption(validator.getOptionName()).getFloatVal(); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } |
@Test public void testSetOption() { final OptionManager eagerCachingOptionManager = new EagerCachingOptionManager(optionManager); final OptionValue newOption = OptionValue.createBoolean(OptionValue.OptionType.SYSTEM, "newOption", true); eagerCachingOptionManager.setOption(newOption); verify(optionManager, times(1)).setOption(newOption); } | @Override public boolean setOption(OptionValue value) { return super.setOption(value) && delegate.setOption(value); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean setOption(OptionValue value) { return super.setOption(value) && delegate.setOption(value); } } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean setOption(OptionValue value) { return super.setOption(value) && delegate.setOption(value); } EagerCachingOptionManager(OptionManager delegate); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean setOption(OptionValue value) { return super.setOption(value) && delegate.setOption(value); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean setOption(OptionValue value) { return super.setOption(value) && delegate.setOption(value); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } |
@Test public void testDeleteOption() { final OptionManager eagerCachingOptionManager = new EagerCachingOptionManager(optionManager); eagerCachingOptionManager.deleteOption(optionValueC.getName(), OptionValue.OptionType.SYSTEM); assertNull(eagerCachingOptionManager.getOption(optionValueC.getName())); verify(optionManager, times(1)).deleteOption(optionValueC.getName(), OptionValue.OptionType.SYSTEM); } | @Override public boolean deleteOption(String name, OptionType type) { return super.deleteOption(name, type) && delegate.deleteOption(name, type); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteOption(String name, OptionType type) { return super.deleteOption(name, type) && delegate.deleteOption(name, type); } } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteOption(String name, OptionType type) { return super.deleteOption(name, type) && delegate.deleteOption(name, type); } EagerCachingOptionManager(OptionManager delegate); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteOption(String name, OptionType type) { return super.deleteOption(name, type) && delegate.deleteOption(name, type); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteOption(String name, OptionType type) { return super.deleteOption(name, type) && delegate.deleteOption(name, type); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } |
@Test public void testDeleteAllOptions() { final OptionManager eagerCachingOptionManager = new EagerCachingOptionManager(optionManager); eagerCachingOptionManager.deleteAllOptions(OptionValue.OptionType.SYSTEM); assertNull(eagerCachingOptionManager.getOption(optionValueA.getName())); assertNull(eagerCachingOptionManager.getOption(optionValueB.getName())); assertNull(eagerCachingOptionManager.getOption(optionValueC.getName())); verify(optionManager, times(1)).deleteAllOptions(OptionValue.OptionType.SYSTEM); } | @Override public boolean deleteAllOptions(OptionType type) { return super.deleteAllOptions(type) && delegate.deleteAllOptions(type); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteAllOptions(OptionType type) { return super.deleteAllOptions(type) && delegate.deleteAllOptions(type); } } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteAllOptions(OptionType type) { return super.deleteAllOptions(type) && delegate.deleteAllOptions(type); } EagerCachingOptionManager(OptionManager delegate); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteAllOptions(OptionType type) { return super.deleteAllOptions(type) && delegate.deleteAllOptions(type); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } | EagerCachingOptionManager extends InMemoryOptionManager { @Override public boolean deleteAllOptions(OptionType type) { return super.deleteAllOptions(type) && delegate.deleteAllOptions(type); } EagerCachingOptionManager(OptionManager delegate); @Override boolean setOption(OptionValue value); @Override boolean deleteOption(String name, OptionType type); @Override boolean deleteAllOptions(OptionType type); @Override double getOption(DoubleValidator validator); @Override long getOption(LongValidator validator); @Override String getOption(StringValidator validator); @Override OptionValidatorListing getOptionValidatorListing(); } |
@Test public void testVisitor() { FieldTransformationBase exp = new FieldConvertToJSON(); String name = exp.accept(new FieldTransformationBase.FieldTransformationVisitor<String>() { @Override public String visit(FieldConvertCase col) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldTrim changeCase) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldExtract extract) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertFloatToInteger trim) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertFloatToDecimal calculatedField) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertDateToText fieldTransformation) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertNumberToDate numberToDate) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertDateToNumber dateToNumber) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertTextToDate textToDate) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertListToText fieldTransformation) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertToJSON fieldTransformation) throws Exception { return "json"; } @Override public String visit(FieldUnnestList unnest) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldReplacePattern replacePattern) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldReplaceCustom replacePattern) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldReplaceValue replacePattern) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldReplaceRange replaceRange) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldExtractMap extract) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldExtractList extract) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldSplit split) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldSimpleConvertToType toType) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertToTypeIfPossible toTypeIfPossible) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertToTypeWithPatternIfPossible toTypeIfPossible) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(FieldConvertFromJSON fromJson) throws Exception { throw new UnsupportedOperationException("NYI"); } }); assertEquals("json", name); } | public final <T> T accept(FieldTransformationVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } | FieldTransformationBase { public final <T> T accept(FieldTransformationVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | FieldTransformationBase { public final <T> T accept(FieldTransformationVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | FieldTransformationBase { public final <T> T accept(FieldTransformationVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(FieldTransformationVisitor<T> visitor); FieldTransformation wrap(); @Override String toString(); static FieldTransformationBase unwrap(FieldTransformation t); static Converter<FieldTransformationBase, FieldTransformation> converter(); } | FieldTransformationBase { public final <T> T accept(FieldTransformationVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(FieldTransformationVisitor<T> visitor); FieldTransformation wrap(); @Override String toString(); static FieldTransformationBase unwrap(FieldTransformation t); static Converter<FieldTransformationBase, FieldTransformation> converter(); static final Acceptor<FieldTransformationBase, FieldTransformationVisitor<?>, FieldTransformation> acceptor; } |
@Test public void test() throws Exception { Class<?> byteBufferPositionedReadableClass = getClass("org.apache.hadoop.fs.ByteBufferPositionedReadable"); assumeNonMaprProfile(); final IOException ioException = new IOException("test io exception"); final FSError fsError = newFSError(ioException); FSDataInputStream fdis = new FSDataInputStream(mock(InputStream.class, withSettings().extraInterfaces(Seekable.class, byteBufferPositionedReadableClass == null ? AutoCloseable.class : byteBufferPositionedReadableClass, PositionedReadable.class, ByteBufferReadable.class).defaultAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { throw fsError; } }))); FSInputStream fdisw = FSDataInputStreamWrapper.of(fdis); Object[] params = getDummyArguments(method); try { method.invoke(fdisw, params); } catch(InvocationTargetException e) { if (byteBufferPositionedReadableClass == null) { assertThat(e.getTargetException(), anyOf(is(instanceOf(IOException.class)), is(instanceOf(UnsupportedOperationException.class)))); } else { assertThat(e.getTargetException(), is(instanceOf(IOException.class))); } if (e.getTargetException() instanceof IOException) { assertThat((IOException) e.getTargetException(), is(sameInstance(ioException))); } } } | public static FSInputStream of(FSDataInputStream in) throws IOException { if (in.getWrappedStream() instanceof ByteBufferReadable) { return new FSDataInputStreamWrapper(in); } return new ByteArrayFSInputStream(in); } | FSDataInputStreamWrapper extends FSInputStream { public static FSInputStream of(FSDataInputStream in) throws IOException { if (in.getWrappedStream() instanceof ByteBufferReadable) { return new FSDataInputStreamWrapper(in); } return new ByteArrayFSInputStream(in); } } | FSDataInputStreamWrapper extends FSInputStream { public static FSInputStream of(FSDataInputStream in) throws IOException { if (in.getWrappedStream() instanceof ByteBufferReadable) { return new FSDataInputStreamWrapper(in); } return new ByteArrayFSInputStream(in); } private FSDataInputStreamWrapper(FSDataInputStream in); } | FSDataInputStreamWrapper extends FSInputStream { public static FSInputStream of(FSDataInputStream in) throws IOException { if (in.getWrappedStream() instanceof ByteBufferReadable) { return new FSDataInputStreamWrapper(in); } return new ByteArrayFSInputStream(in); } private FSDataInputStreamWrapper(FSDataInputStream in); static FSInputStream of(FSDataInputStream in); @Override int read(); @Override int read(byte[] b); @Override int read(byte[] b, int off, int len); @Override int read(ByteBuffer dst); @Override int read(long position, ByteBuffer dst); @Override long getPosition(); @Override void setPosition(long position); @Override long skip(long n); @Override int available(); @Override void close(); @Override void mark(int readlimit); @Override void reset(); @Override boolean markSupported(); } | FSDataInputStreamWrapper extends FSInputStream { public static FSInputStream of(FSDataInputStream in) throws IOException { if (in.getWrappedStream() instanceof ByteBufferReadable) { return new FSDataInputStreamWrapper(in); } return new ByteArrayFSInputStream(in); } private FSDataInputStreamWrapper(FSDataInputStream in); static FSInputStream of(FSDataInputStream in); @Override int read(); @Override int read(byte[] b); @Override int read(byte[] b, int off, int len); @Override int read(ByteBuffer dst); @Override int read(long position, ByteBuffer dst); @Override long getPosition(); @Override void setPosition(long position); @Override long skip(long n); @Override int available(); @Override void close(); @Override void mark(int readlimit); @Override void reset(); @Override boolean markSupported(); } |
@Test public void throwsProperly() throws Exception { final String username = "throwsProperly"; final StoragePlugin plugin = mock(StoragePlugin.class); final SourceConfig sourceConfig = new SourceConfig(); final PermissionCheckCache checks = new PermissionCheckCache(DirectProvider.wrap(plugin), DirectProvider.wrap(1000L), 1000); when(plugin.hasAccessPermission(anyString(), any(NamespaceKey.class), any(DatasetConfig.class))) .thenThrow(new RuntimeException("you shall not pass")); try { checks.hasAccess(username, new NamespaceKey(Lists.newArrayList("what")), null, new MetadataStatsCollector(), sourceConfig); fail(); } catch (UserException e) { assertEquals(UserBitShared.DremioPBError.ErrorType.PERMISSION, e.getErrorType()); assertEquals("Access denied reading dataset what.", e.getMessage()); } } | public boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig) { final Stopwatch permissionCheck = Stopwatch.createStarted(); if (authTtlMs.get() == 0) { boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; } final Key key = new Key(username, namespaceKey); final long now = System.currentTimeMillis(); final Callable<Value> loader = () -> { final boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); if (!hasAccess) { throw NoAccessException.INSTANCE; } return new Value(true, now); }; Value value; try { PermissionCheckAccessType permissionCheckAccessType; value = getFromPermissionsCache(key, loader); if (now == value.createdAt) { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_MISS; } else { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_HIT; } if (now - value.createdAt > authTtlMs.get()) { permissionsCache.invalidate(key); value = getFromPermissionsCache(key, loader); permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_EXPIRED; } permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return value.hasAccess; } catch (ExecutionException e) { throw new RuntimeException("Permission check loader should not throw a checked exception", e.getCause()); } catch (UncheckedExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof UserException) { throw (UserException) cause; } throw UserException.permissionError(cause) .message("Access denied reading dataset %s.", namespaceKey.toString()) .build(logger); } } | PermissionCheckCache { public boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig) { final Stopwatch permissionCheck = Stopwatch.createStarted(); if (authTtlMs.get() == 0) { boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; } final Key key = new Key(username, namespaceKey); final long now = System.currentTimeMillis(); final Callable<Value> loader = () -> { final boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); if (!hasAccess) { throw NoAccessException.INSTANCE; } return new Value(true, now); }; Value value; try { PermissionCheckAccessType permissionCheckAccessType; value = getFromPermissionsCache(key, loader); if (now == value.createdAt) { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_MISS; } else { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_HIT; } if (now - value.createdAt > authTtlMs.get()) { permissionsCache.invalidate(key); value = getFromPermissionsCache(key, loader); permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_EXPIRED; } permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return value.hasAccess; } catch (ExecutionException e) { throw new RuntimeException("Permission check loader should not throw a checked exception", e.getCause()); } catch (UncheckedExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof UserException) { throw (UserException) cause; } throw UserException.permissionError(cause) .message("Access denied reading dataset %s.", namespaceKey.toString()) .build(logger); } } } | PermissionCheckCache { public boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig) { final Stopwatch permissionCheck = Stopwatch.createStarted(); if (authTtlMs.get() == 0) { boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; } final Key key = new Key(username, namespaceKey); final long now = System.currentTimeMillis(); final Callable<Value> loader = () -> { final boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); if (!hasAccess) { throw NoAccessException.INSTANCE; } return new Value(true, now); }; Value value; try { PermissionCheckAccessType permissionCheckAccessType; value = getFromPermissionsCache(key, loader); if (now == value.createdAt) { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_MISS; } else { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_HIT; } if (now - value.createdAt > authTtlMs.get()) { permissionsCache.invalidate(key); value = getFromPermissionsCache(key, loader); permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_EXPIRED; } permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return value.hasAccess; } catch (ExecutionException e) { throw new RuntimeException("Permission check loader should not throw a checked exception", e.getCause()); } catch (UncheckedExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof UserException) { throw (UserException) cause; } throw UserException.permissionError(cause) .message("Access denied reading dataset %s.", namespaceKey.toString()) .build(logger); } } PermissionCheckCache(
Provider<StoragePlugin> plugin,
Provider<Long> authTtlMs,
final long maximumSize); } | PermissionCheckCache { public boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig) { final Stopwatch permissionCheck = Stopwatch.createStarted(); if (authTtlMs.get() == 0) { boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; } final Key key = new Key(username, namespaceKey); final long now = System.currentTimeMillis(); final Callable<Value> loader = () -> { final boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); if (!hasAccess) { throw NoAccessException.INSTANCE; } return new Value(true, now); }; Value value; try { PermissionCheckAccessType permissionCheckAccessType; value = getFromPermissionsCache(key, loader); if (now == value.createdAt) { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_MISS; } else { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_HIT; } if (now - value.createdAt > authTtlMs.get()) { permissionsCache.invalidate(key); value = getFromPermissionsCache(key, loader); permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_EXPIRED; } permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return value.hasAccess; } catch (ExecutionException e) { throw new RuntimeException("Permission check loader should not throw a checked exception", e.getCause()); } catch (UncheckedExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof UserException) { throw (UserException) cause; } throw UserException.permissionError(cause) .message("Access denied reading dataset %s.", namespaceKey.toString()) .build(logger); } } PermissionCheckCache(
Provider<StoragePlugin> plugin,
Provider<Long> authTtlMs,
final long maximumSize); boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig); } | PermissionCheckCache { public boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig) { final Stopwatch permissionCheck = Stopwatch.createStarted(); if (authTtlMs.get() == 0) { boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; } final Key key = new Key(username, namespaceKey); final long now = System.currentTimeMillis(); final Callable<Value> loader = () -> { final boolean hasAccess = checkPlugin(username, namespaceKey, config, sourceConfig); if (!hasAccess) { throw NoAccessException.INSTANCE; } return new Value(true, now); }; Value value; try { PermissionCheckAccessType permissionCheckAccessType; value = getFromPermissionsCache(key, loader); if (now == value.createdAt) { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_MISS; } else { permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_HIT; } if (now - value.createdAt > authTtlMs.get()) { permissionsCache.invalidate(key); value = getFromPermissionsCache(key, loader); permissionCheckAccessType = PermissionCheckAccessType.PERMISSION_CACHE_EXPIRED; } permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return value.hasAccess; } catch (ExecutionException e) { throw new RuntimeException("Permission check loader should not throw a checked exception", e.getCause()); } catch (UncheckedExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof UserException) { throw (UserException) cause; } throw UserException.permissionError(cause) .message("Access denied reading dataset %s.", namespaceKey.toString()) .build(logger); } } PermissionCheckCache(
Provider<StoragePlugin> plugin,
Provider<Long> authTtlMs,
final long maximumSize); boolean hasAccess(final String username, final NamespaceKey namespaceKey, final DatasetConfig config, final MetadataStatsCollector metadataStatsCollector, final SourceConfig sourceConfig); } |
@Test public void testAccessUsernameOverride() throws Exception { final NamespaceKey namespaceKey = new NamespaceKey("test"); final ViewExpansionContext viewExpansionContext = mock(ViewExpansionContext.class); when(viewExpansionContext.getQueryUser()).thenReturn("newaccessuser"); final SchemaConfig schemaConfig = mock(SchemaConfig.class); when(schemaConfig.getUserName()).thenReturn("username"); when(schemaConfig.getViewExpansionContext()).thenReturn(viewExpansionContext); final MetadataStatsCollector statsCollector = mock(MetadataStatsCollector.class); final MetadataRequestOptions metadataRequestOptions = mock(MetadataRequestOptions.class); when(metadataRequestOptions.getSchemaConfig()).thenReturn(schemaConfig); when(metadataRequestOptions.getStatsCollector()).thenReturn(statsCollector); final ReadDefinition readDefinition = new ReadDefinition(); readDefinition.setSplitVersion(0L); final DatasetConfig datasetConfig = new DatasetConfig(); datasetConfig.setType(DatasetType.PHYSICAL_DATASET); datasetConfig.setId(new EntityId("test")); datasetConfig.setFullPathList(Collections.singletonList("test")); datasetConfig.setReadDefinition(readDefinition); datasetConfig.setTotalNumSplits(0); class FakeSource extends ConnectionConf<FakeSource, StoragePlugin> implements ImpersonationConf { @Override public StoragePlugin newPlugin(SabotContext context, String name, Provider<StoragePluginId> pluginIdProvider) { return null; } @Override public String getAccessUserName(String delegatedUser, String queryUserName) { return queryUserName; } } final FakeSource fakeSource = new FakeSource(); final ManagedStoragePlugin managedStoragePlugin = mock(ManagedStoragePlugin.class); when(managedStoragePlugin.getId()).thenReturn(mock(StoragePluginId.class)); doReturn(fakeSource).when(managedStoragePlugin).getConnectionConf(); when(managedStoragePlugin.isCompleteAndValid(any(), any())).thenReturn(true); doThrow(new RuntimeException("Wrong username")) .when(managedStoragePlugin).checkAccess(namespaceKey, datasetConfig, "username", metadataRequestOptions); final PluginRetriever pluginRetriever = mock(PluginRetriever.class); when(pluginRetriever.getPlugin(namespaceKey.getRoot(), false)).thenReturn(managedStoragePlugin); final NamespaceService namespaceService = mock(NamespaceService.class); when(namespaceService.getDataset(namespaceKey)).thenReturn(datasetConfig); final OptionManager optionManager = mock(OptionManager.class); final DatasetManager datasetManager = new DatasetManager(pluginRetriever, namespaceService, optionManager); datasetManager.getTable(namespaceKey, metadataRequestOptions, false); } | public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); DremioTable getTable(
NamespaceKey key,
MetadataRequestOptions options,
boolean ignoreColumnCount
); DremioTable getTable(
String datasetId,
MetadataRequestOptions options
); boolean createOrUpdateDataset(
ManagedStoragePlugin plugin,
NamespaceKey datasetPath,
DatasetConfig newConfig,
NamespaceAttribute... attributes
); void createDataset(NamespaceKey key, ManagedStoragePlugin plugin, Function<DatasetConfig, DatasetConfig> datasetMutator); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); DremioTable getTable(
NamespaceKey key,
MetadataRequestOptions options,
boolean ignoreColumnCount
); DremioTable getTable(
String datasetId,
MetadataRequestOptions options
); boolean createOrUpdateDataset(
ManagedStoragePlugin plugin,
NamespaceKey datasetPath,
DatasetConfig newConfig,
NamespaceAttribute... attributes
); void createDataset(NamespaceKey key, ManagedStoragePlugin plugin, Function<DatasetConfig, DatasetConfig> datasetMutator); } |
@Test public void ignoreColumnCountOnDrop() throws Exception { final NamespaceKey namespaceKey = new NamespaceKey("test"); final ViewExpansionContext viewExpansionContext = mock(ViewExpansionContext.class); when(viewExpansionContext.getQueryUser()).thenReturn("newaccessuser"); final SchemaConfig schemaConfig = mock(SchemaConfig.class); when(schemaConfig.getUserName()).thenReturn("username"); when(schemaConfig.getViewExpansionContext()).thenReturn(viewExpansionContext); final MetadataStatsCollector statsCollector = mock(MetadataStatsCollector.class); final MetadataRequestOptions metadataRequestOptions = mock(MetadataRequestOptions.class); when(metadataRequestOptions.getSchemaConfig()).thenReturn(schemaConfig); when(metadataRequestOptions.getStatsCollector()).thenReturn(statsCollector); final ReadDefinition readDefinition = new ReadDefinition(); readDefinition.setSplitVersion(0L); final DatasetConfig datasetConfig = new DatasetConfig(); datasetConfig.setType(DatasetType.PHYSICAL_DATASET); datasetConfig.setId(new EntityId("test")); datasetConfig.setFullPathList(ImmutableList.of("test", "file", "foobar")); datasetConfig.setReadDefinition(readDefinition); datasetConfig.setTotalNumSplits(0); final ManagedStoragePlugin managedStoragePlugin = mock(ManagedStoragePlugin.class); when(managedStoragePlugin.getId()).thenReturn(mock(StoragePluginId.class)); when(managedStoragePlugin.isCompleteAndValid(any(), any())).thenReturn(false); when(managedStoragePlugin.getDefaultRetrievalOptions()).thenReturn(DatasetRetrievalOptions.DEFAULT); when(managedStoragePlugin.getDatasetHandle(any(), any(), any())).thenAnswer(invocation -> { Assert.assertEquals(invocation.getArgumentAt(2, DatasetRetrievalOptions.class).maxMetadataLeafColumns(), Integer.MAX_VALUE); return Optional.empty(); }); final PluginRetriever pluginRetriever = mock(PluginRetriever.class); when(pluginRetriever.getPlugin(namespaceKey.getRoot(), false)).thenReturn(managedStoragePlugin); final NamespaceService namespaceService = mock(NamespaceService.class); when(namespaceService.getDataset(namespaceKey)).thenReturn(datasetConfig); final OptionManager optionManager = mock(OptionManager.class); final DatasetManager datasetManager = new DatasetManager(pluginRetriever, namespaceService, optionManager); datasetManager.getTable(namespaceKey, metadataRequestOptions, true); } | public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); DremioTable getTable(
NamespaceKey key,
MetadataRequestOptions options,
boolean ignoreColumnCount
); DremioTable getTable(
String datasetId,
MetadataRequestOptions options
); boolean createOrUpdateDataset(
ManagedStoragePlugin plugin,
NamespaceKey datasetPath,
DatasetConfig newConfig,
NamespaceAttribute... attributes
); void createDataset(NamespaceKey key, ManagedStoragePlugin plugin, Function<DatasetConfig, DatasetConfig> datasetMutator); } | DatasetManager { public DremioTable getTable( NamespaceKey key, MetadataRequestOptions options, boolean ignoreColumnCount ){ final ManagedStoragePlugin plugin; final DatasetConfig config = getConfig(key); if(config != null) { key = new NamespaceKey(config.getFullPathList()); } plugin = plugins.getPlugin(key.getRoot(), false); if(plugin != null) { if(config == null || config.getType() != DatasetType.VIRTUAL_DATASET) { return getTableFromPlugin(key, config, plugin, options, ignoreColumnCount); } } if(config == null) { return null; } if(config.getType() != DatasetType.VIRTUAL_DATASET) { return null; } return createTableFromVirtualDataset(config, options); } DatasetManager(
PluginRetriever plugins,
NamespaceService userNamespaceService,
OptionManager optionManager
); DremioTable getTable(
NamespaceKey key,
MetadataRequestOptions options,
boolean ignoreColumnCount
); DremioTable getTable(
String datasetId,
MetadataRequestOptions options
); boolean createOrUpdateDataset(
ManagedStoragePlugin plugin,
NamespaceKey datasetPath,
DatasetConfig newConfig,
NamespaceAttribute... attributes
); void createDataset(NamespaceKey key, ManagedStoragePlugin plugin, Function<DatasetConfig, DatasetConfig> datasetMutator); } |
@Test public void deleteUnavailableDataset() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())) .thenReturn( new DatasetConfig() .setTag("0") .setReadDefinition(new ReadDefinition()) .setFullPathList(ImmutableList.of("one", "two")) ); boolean[] deleted = new boolean[] {false}; doAnswer(invocation -> { deleted[0] = true; return null; }).when(ns).deleteDataset(any(), anyString()); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.empty()); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy().setDeleteUnavailableDatasets(false)); when(msp.getMaxMetadataColumns()) .thenReturn(MAX_COLUMNS); when(msp.getMaxNestedLevels()) .thenReturn(MAX_NESTED_LEVELS); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster); assertEquals(DatasetCatalog.UpdateStatus.DELETED, manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT)); assertTrue(deleted[0]); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void doNotDeleteUnavailableDataset() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())) .thenReturn(new DatasetConfig() .setReadDefinition(new ReadDefinition()) .setFullPathList(ImmutableList.of("one", "two"))); doThrow(new IllegalStateException("should not invoke deleteDataset()")) .when(ns) .deleteDataset(any(), anyString()); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.empty()); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy().setDeleteUnavailableDatasets(false)); when(msp.getMaxMetadataColumns()) .thenReturn(MAX_COLUMNS); when(msp.getMaxNestedLevels()) .thenReturn(MAX_NESTED_LEVELS); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster); assertEquals(DatasetCatalog.UpdateStatus.UNCHANGED, manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT.toBuilder() .setDeleteUnavailableDatasets(false) .build())); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void deleteUnavailableDatasetWithoutDefinition() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())) .thenReturn( new DatasetConfig() .setTag("0") .setFullPathList(ImmutableList.of("one", "two")) ); boolean[] deleted = new boolean[] {false}; doAnswer(invocation -> { deleted[0] = true; return null; }).when(ns).deleteDataset(any(), anyString()); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.empty()); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy().setDeleteUnavailableDatasets(false)); when(msp.getMaxMetadataColumns()) .thenReturn(MAX_COLUMNS); when(msp.getMaxNestedLevels()) .thenReturn(MAX_NESTED_LEVELS); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster); assertEquals(DatasetCatalog.UpdateStatus.DELETED, manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT)); assertTrue(deleted[0]); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void doNotDeleteUnavailableDatasetWithoutDefinition() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())).thenReturn(new DatasetConfig() .setFullPathList(ImmutableList.of("one", "two"))); doThrow(new IllegalStateException("should not invoke deleteDataset()")) .when(ns) .deleteDataset(any(), anyString()); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.empty()); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy().setDeleteUnavailableDatasets(false)); when(msp.getMaxMetadataColumns()) .thenReturn(MAX_COLUMNS); when(msp.getMaxNestedLevels()) .thenReturn(MAX_NESTED_LEVELS); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster); assertEquals(DatasetCatalog.UpdateStatus.UNCHANGED, manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT.toBuilder() .setDeleteUnavailableDatasets(false) .build())); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void checkForceUpdate() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())).thenReturn(null); DatasetMetadataSaver saver = mock(DatasetMetadataSaver.class); doNothing().when(saver).saveDataset(any(), anyBoolean(), any(), any()); when(ns.newDatasetMetadataSaver(any(), any(), any(), anyLong())) .thenReturn(saver); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); DatasetHandle handle = () -> new EntityPath(Lists.newArrayList("one")); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.of(handle)); when(sp.provideSignature(any(), any())) .thenReturn(BytesOutput.NONE); final boolean[] forced = new boolean[]{false}; doAnswer(invocation -> { forced[0] = true; return DatasetMetadata.of(DatasetStats.of(0, ScanCostFactor.OTHER.getFactor()), new Schema(new ArrayList<>())); }).when(sp).getDatasetMetadata(any(DatasetHandle.class), any(PartitionChunkListing.class), any(), any()); when(sp.listPartitionChunks(any(), any(), any())) .thenReturn(Collections::emptyIterator); when(sp.validateMetadata(any(), any(), any())) .thenReturn(SupportsReadSignature.MetadataValidity.VALID); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy().setDeleteUnavailableDatasets(false)); when(msp.getMaxMetadataColumns()) .thenReturn(MAX_COLUMNS); when(msp.getMaxNestedLevels()) .thenReturn(MAX_NESTED_LEVELS); when(msp.getNamespaceService()).thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster ); manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT.toBuilder() .setForceUpdate(true) .build()); assertTrue(forced[0]); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void dataSetPathCaseSensitivity() throws Exception { final String qualifier = "inspector"; final String original = "testPath"; final String capital = "TESTPATH"; final ImmutableList<String> fullPathList = ImmutableList.of(qualifier, original); final EntityPath originalPath = new EntityPath(fullPathList); final EntityPath capitalPath = new EntityPath(ImmutableList.of(qualifier, capital)); final DatasetHandle datasetHandle = () -> originalPath; final NamespaceKey dataSetKey = new NamespaceKey(ImmutableList.of(qualifier, capital)); ExtendedStoragePlugin mockStoragePlugin = mock(ExtendedStoragePlugin.class); when(mockStoragePlugin.listDatasetHandles()) .thenReturn(Collections::emptyIterator); when(mockStoragePlugin.getDatasetHandle(eq(capitalPath), any(), any())) .thenReturn(Optional.empty()); when(mockStoragePlugin.getDatasetHandle(eq(originalPath), any(), any())) .thenReturn(Optional.of(datasetHandle)); when(mockStoragePlugin.getState()) .thenReturn(SourceState.GOOD); when(mockStoragePlugin.listPartitionChunks(any(), any(), any())) .thenReturn(Collections::emptyIterator); when(mockStoragePlugin.validateMetadata(any(), any(), any())) .thenReturn(SupportsReadSignature.MetadataValidity.VALID); when(mockStoragePlugin.provideSignature(any(), any())) .thenReturn(BytesOutput.NONE); final boolean[] forced = new boolean[]{false}; doAnswer(invocation -> { forced[0] = true; return DatasetMetadata.of(DatasetStats.of(0, ScanCostFactor.OTHER.getFactor()), new Schema(new ArrayList<>())); }).when(mockStoragePlugin).getDatasetMetadata(any(DatasetHandle.class), any(PartitionChunkListing.class), any(), any()); NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())) .thenReturn(MetadataObjectsUtils.newShallowConfig(datasetHandle)); DatasetMetadataSaver saver = mock(DatasetMetadataSaver.class); doNothing().when(saver).saveDataset(any(), anyBoolean(), any(), any()); when(ns.newDatasetMetadataSaver(any(), any(), any(), anyLong())) .thenReturn(saver); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(mockStoragePlugin); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy()); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( dataSetKey, mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster ); assertEquals(DatasetCatalog.UpdateStatus.CHANGED, manager.refreshDataset(dataSetKey, DatasetRetrievalOptions.DEFAULT.toBuilder() .build()) ); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void testVisitor() { ExpressionBase exp = new ExpCalculatedField("foo"); String name = exp.accept(new ExpressionBase.ExpressionVisitor<String>() { @Override public String visit(ExpColumnReference col) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpConvertCase changeCase) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpExtract extract) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpTrim trim) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpCalculatedField calculatedField) throws Exception { return "calc"; } @Override public String visit(ExpFieldTransformation fieldTransformation) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpConvertType convertType) throws Exception { throw new UnsupportedOperationException("NYI"); } @Override public String visit(ExpMeasure measure) throws Exception { throw new UnsupportedOperationException("NYI"); } }); assertEquals("calc", name); } | public final <T> T accept(ExpressionVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } | ExpressionBase { public final <T> T accept(ExpressionVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | ExpressionBase { public final <T> T accept(ExpressionVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } } | ExpressionBase { public final <T> T accept(ExpressionVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(ExpressionVisitor<T> visitor); Expression wrap(); @Override String toString(); static ExpressionBase unwrap(Expression t); static Converter<ExpressionBase, Expression> converter(); } | ExpressionBase { public final <T> T accept(ExpressionVisitor<T> visitor) throws VisitorException { return acceptor.accept(visitor, this); } final T accept(ExpressionVisitor<T> visitor); Expression wrap(); @Override String toString(); static ExpressionBase unwrap(Expression t); static Converter<ExpressionBase, Expression> converter(); static final Acceptor<ExpressionBase, ExpressionVisitor<?>, Expression> acceptor; } |
@Test public void exceedMaxColumnLimit() throws Exception { NamespaceService ns = mock(NamespaceService.class); when(ns.getDataset(any())) .thenReturn(null); ExtendedStoragePlugin sp = mock(ExtendedStoragePlugin.class); DatasetHandle handle = () -> new EntityPath(Lists.newArrayList("one")); when(sp.getDatasetHandle(any(), any(), any())) .thenReturn(Optional.of(handle)); when(sp.listPartitionChunks(any(), any(), any())) .thenReturn(Collections::emptyIterator); when(sp.validateMetadata(any(), eq(handle), any())) .thenReturn(SupportsReadSignature.MetadataValidity.INVALID); doThrow(new ColumnCountTooLargeException(1)) .when(sp) .getDatasetMetadata(eq(handle), any(PartitionChunkListing.class), any(), any()); ManagedStoragePlugin.MetadataBridge msp = mock(ManagedStoragePlugin.MetadataBridge.class); when(msp.getMetadata()) .thenReturn(sp); when(msp.getMetadataPolicy()) .thenReturn(new MetadataPolicy()); when(msp.getNamespaceService()) .thenReturn(ns); SourceMetadataManager manager = new SourceMetadataManager( new NamespaceKey("joker"), mock(SchedulerService.class), true, mock(LegacyKVStore.class), msp, optionManager, CatalogServiceMonitor.DEFAULT, () -> broadcaster ); thrownException.expect(new UserExceptionMatcher(UserBitShared.DremioPBError.ErrorType.VALIDATION, "exceeded the maximum number of fields of 1")); manager.refreshDataset(new NamespaceKey(""), DatasetRetrievalOptions.DEFAULT.toBuilder() .setForceUpdate(true) .setMaxMetadataLeafColumns(1) .build()); } | UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } | SourceMetadataManager implements AutoCloseable { UpdateStatus refreshDataset(NamespaceKey datasetKey, DatasetRetrievalOptions options) throws ConnectorException, NamespaceException { options.withFallback(bridge.getDefaultRetrievalOptions()); final NamespaceService namespace = bridge.getNamespaceService(); final DatasetSaver saver = getSaver(); DatasetConfig knownConfig = null; try { knownConfig = namespace.getDataset(datasetKey); } catch (NamespaceNotFoundException ignored) { } final DatasetConfig currentConfig = knownConfig; final boolean exists = currentConfig != null; final boolean isExtended = exists && currentConfig.getReadDefinition() != null; final EntityPath entityPath; if (exists) { entityPath = new EntityPath(currentConfig.getFullPathList()); } else { entityPath = MetadataObjectsUtils.toEntityPath(datasetKey); } logger.debug("Dataset '{}' is being synced (exists: {}, isExtended: {})", datasetKey, exists, isExtended); final SourceMetadata sourceMetadata = bridge.getMetadata(); final Optional<DatasetHandle> handle = sourceMetadata.getDatasetHandle(entityPath, options.asGetDatasetOptions(currentConfig)); if (!handle.isPresent()) { if (!exists) { throw new DatasetNotFoundException(entityPath); } if (!options.deleteUnavailableDatasets()) { logger.debug("Dataset '{}' unavailable, but not deleted", datasetKey); return UpdateStatus.UNCHANGED; } try { namespace.deleteDataset(datasetKey, currentConfig.getTag()); logger.trace("Dataset '{}' deleted", datasetKey); return UpdateStatus.DELETED; } catch (NamespaceException e) { logger.debug("Dataset '{}' delete failed", datasetKey, e); return UpdateStatus.UNCHANGED; } } final DatasetHandle datasetHandle = handle.get(); if (!options.forceUpdate() && exists && isExtended && sourceMetadata instanceof SupportsReadSignature) { final SupportsReadSignature supportsReadSignature = (SupportsReadSignature) sourceMetadata; final DatasetMetadata currentExtended = new DatasetMetadataAdapter(currentConfig); final ByteString readSignature = currentConfig.getReadDefinition().getReadSignature(); final MetadataValidity metadataValidity = supportsReadSignature.validateMetadata( readSignature == null ? BytesOutput.NONE : os -> ByteString.writeTo(os, readSignature), datasetHandle, currentExtended); if (metadataValidity == MetadataValidity.VALID) { logger.trace("Dataset '{}' metadata is valid, skipping", datasetKey); return UpdateStatus.UNCHANGED; } } final DatasetConfig datasetConfig; if (exists) { datasetConfig = currentConfig; } else { datasetConfig = MetadataObjectsUtils.newShallowConfig(datasetHandle); } saver.save(datasetConfig, datasetHandle, sourceMetadata, false, options); logger.trace("Dataset '{}' metadata saved to namespace", datasetKey); return UpdateStatus.CHANGED; } SourceMetadataManager(
NamespaceKey sourceName,
SchedulerService scheduler,
boolean isMaster,
LegacyKVStore<NamespaceKey, SourceInternalData> sourceDataStore,
final ManagedStoragePlugin.MetadataBridge bridge,
final OptionManager options,
final CatalogServiceMonitor monitor,
final Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider
); void setMetadataSyncInfo(UpdateLastRefreshDateRequest request); @Override void close(); long getLastFullRefreshDateMs(); long getLastNamesRefreshDateMs(); } |
@Test public void emptyIterator() { final NamespaceListing listing = new NamespaceListing(null, null, null, null); try { listing.newIterator(Collections.emptyIterator()).next(); fail(); } catch (NoSuchElementException expected) { } assertFalse(listing.newIterator(Collections.emptyIterator()).hasNext()); } | @VisibleForTesting Iterator<DatasetHandle> newIterator(Iterator<NamespaceKey> keyIterator) { return new TransformingIterator(keyIterator); } | NamespaceListing implements DatasetHandleListing { @VisibleForTesting Iterator<DatasetHandle> newIterator(Iterator<NamespaceKey> keyIterator) { return new TransformingIterator(keyIterator); } } | NamespaceListing implements DatasetHandleListing { @VisibleForTesting Iterator<DatasetHandle> newIterator(Iterator<NamespaceKey> keyIterator) { return new TransformingIterator(keyIterator); } NamespaceListing(
NamespaceService namespaceService,
NamespaceKey sourceKey,
SourceMetadata sourceMetadata,
DatasetRetrievalOptions options
); } | NamespaceListing implements DatasetHandleListing { @VisibleForTesting Iterator<DatasetHandle> newIterator(Iterator<NamespaceKey> keyIterator) { return new TransformingIterator(keyIterator); } NamespaceListing(
NamespaceService namespaceService,
NamespaceKey sourceKey,
SourceMetadata sourceMetadata,
DatasetRetrievalOptions options
); @Override Iterator<? extends DatasetHandle> iterator(); } | NamespaceListing implements DatasetHandleListing { @VisibleForTesting Iterator<DatasetHandle> newIterator(Iterator<NamespaceKey> keyIterator) { return new TransformingIterator(keyIterator); } NamespaceListing(
NamespaceService namespaceService,
NamespaceKey sourceKey,
SourceMetadata sourceMetadata,
DatasetRetrievalOptions options
); @Override Iterator<? extends DatasetHandle> iterator(); } |
@Test public void refreshSourceMetadata_EmptySource() throws Exception { doMockDatasets(mockUpPlugin, ImmutableList.of()); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); List<NamespaceKey> datasets = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(0, datasets.size()); assertNoDatasetsAfterSourceDeletion(); } | @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); static final long CATALOG_SYNC; static final String CATALOG_SOURCE_DATA_NAMESPACE; } |
@Test public void refreshSourceMetadata_FirstTime() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(5, actualDatasetKeys.size()); assertDatasetsAreEqual(mockDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld21")); assertDatasetSchemasDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); } | @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); static final long CATALOG_SYNC; static final String CATALOG_SOURCE_DATA_NAMESPACE; } |
@Test public void refreshSourceMetadata_FirstTime_UpdateWithNewDatasets() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(5, actualDatasetKeys.size()); List<DatasetHandle> testDatasets = Lists.newArrayList(mockDatasets); testDatasets.add(newDataset(MOCK_UP + ".ds4")); testDatasets.add(newDataset(MOCK_UP + ".fld1.ds13")); testDatasets.add(newDataset(MOCK_UP + ".fld2.fld21.ds212")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds51")); doMockDatasets(mockUpPlugin, testDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(9, actualDatasetKeys.size()); assertDatasetsAreEqual(testDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld21", MOCK_UP + ".fld5")); assertDatasetSchemasDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); } | @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); static final long CATALOG_SYNC; static final String CATALOG_SOURCE_DATA_NAMESPACE; } |
@Test public void refreshSourceMetadata_FirstTime_MultipleUpdatesWithNewDatasetsDeletedDatasets() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); List<DatasetHandle> testDatasets = Lists.newArrayList(); testDatasets.add(newDataset(MOCK_UP + ".fld1.ds11")); testDatasets.add(newDataset(MOCK_UP + ".fld2.fld22.ds222")); testDatasets.add(newDataset(MOCK_UP + ".fld2.ds22")); testDatasets.add(newDataset(MOCK_UP + ".ds4")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds51")); doMockDatasets(mockUpPlugin, testDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(5, actualDatasetKeys.size()); assertDatasetsAreEqual(testDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld22", MOCK_UP + ".fld5")); assertFoldersDoNotExist(Lists.newArrayList(MOCK_UP + ".fld2.fld21")); assertDatasetSchemasDefined(actualDatasetKeys); testDatasets = Lists.newArrayList(); testDatasets.add(newDataset(MOCK_UP + ".fld1.ds11")); testDatasets.add(newDataset(MOCK_UP + ".fld2.ds22")); testDatasets.add(newDataset(MOCK_UP + ".fld2.ds23")); testDatasets.add(newDataset(MOCK_UP + ".ds5")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds51")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds52")); testDatasets.add(newDataset(MOCK_UP + ".fld6.ds61")); doMockDatasets(mockUpPlugin, testDatasets); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); catalogService.refreshSource(mockUpKey, CatalogService.REFRESH_EVERYTHING_NOW, CatalogServiceImpl.UpdateType.FULL); actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(7, actualDatasetKeys.size()); assertDatasetsAreEqual(testDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld5", MOCK_UP + ".fld6")); assertFoldersDoNotExist(Lists.newArrayList((MOCK_UP + ".fld2.fld22"))); assertDatasetSchemasDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); } | @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); static final long CATALOG_SYNC; static final String CATALOG_SOURCE_DATA_NAMESPACE; } |
@Test public void refreshSourceNames() throws Exception { doMockDatasets(mockUpPlugin, mockDatasets); catalogService.refreshSource(mockUpKey, CatalogService.DEFAULT_METADATA_POLICY, CatalogServiceImpl.UpdateType.NAMES); assertEquals(5, Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)).size()); List<DatasetHandle> testDatasets = Lists.newArrayList(mockDatasets); testDatasets.add(newDataset(MOCK_UP + ".fld1.ds13")); testDatasets.add(newDataset(MOCK_UP + ".fld2.fld21.ds212")); testDatasets.add(newDataset(MOCK_UP + ".fld2.ds23")); testDatasets.add(newDataset(MOCK_UP + ".ds4")); testDatasets.add(newDataset(MOCK_UP + ".fld5.ds51")); doMockDatasets(mockUpPlugin, testDatasets); catalogService.refreshSource(mockUpKey, CatalogService.DEFAULT_METADATA_POLICY, CatalogServiceImpl.UpdateType.NAMES); List<NamespaceKey> actualDatasetKeys = Lists.newArrayList(namespaceService.getAllDatasets(mockUpKey)); assertEquals(10, actualDatasetKeys.size()); assertDatasetsAreEqual(testDatasets, actualDatasetKeys); assertFoldersExist(Lists.newArrayList(MOCK_UP + ".fld1", MOCK_UP + ".fld2", MOCK_UP + ".fld2.fld21", MOCK_UP + ".fld5")); assertDatasetSchemasNotDefined(actualDatasetKeys); assertNoDatasetsAfterSourceDeletion(); } | @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); } | CatalogServiceImpl implements CatalogService { @VisibleForTesting public boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType) throws NamespaceException { ManagedStoragePlugin plugin = getPlugins().get(source.getRoot()); if (plugin == null){ throw UserException.validationError().message("Unknown source %s", source.getRoot()).build(logger); } else if (MissingPluginConf.TYPE.equals(plugin.getConfig().getType())) { return false; } return plugin.refresh(updateType, metadataPolicy); } CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles
); @VisibleForTesting CatalogServiceImpl(
Provider<SabotContext> context,
Provider<SchedulerService> scheduler,
Provider<? extends Provider<ConnectionConf<?, ?>>> sysTableConfProvider,
Provider<FabricService> fabric,
Provider<ConnectionReader> connectionReaderProvider,
Provider<BufferAllocator> bufferAllocator,
Provider<LegacyKVStoreProvider> kvStoreProvider,
Provider<DatasetListingService> datasetListingService,
Provider<OptionManager> optionManager,
Provider<MetadataRefreshInfoBroadcaster> broadcasterProvider,
DremioConfig config,
EnumSet<Role> roles,
final CatalogServiceMonitor monitor
); @Override void start(); @VisibleForTesting void deleteExcept(Set<String> rootsToSaveSet); @VisibleForTesting boolean refreshSource(NamespaceKey source, MetadataPolicy metadataPolicy, UpdateType updateType); @VisibleForTesting void synchronizeSources(); @Override void close(); boolean createSourceIfMissingWithThrow(SourceConfig config); @VisibleForTesting void deleteSource(String name); @VisibleForTesting ManagedStoragePlugin getManagedSource(String name); @SuppressWarnings("unchecked") @Override T getSource(StoragePluginId pluginId); @Override SourceState getSourceState(String name); @SuppressWarnings("unchecked") @Override T getSource(String name); @Override Catalog getCatalog(MetadataRequestOptions requestOptions); @Override boolean isSourceConfigMetadataImpacting(SourceConfig config); @Override RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase); @VisibleForTesting Catalog getSystemUserCatalog(); static final long CATALOG_SYNC; static final String CATALOG_SOURCE_DATA_NAMESPACE; } |
@Test public void get() { assertEquals(instance.get(1526173261000L), 1); assertEquals(instance.get(1526259661000L), 2); assertEquals(instance.get(1526086861000L), 7); } | @Override public int get(long instant) { return map(chronology.dayOfWeek().get(instant)); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public int get(long instant) { return map(chronology.dayOfWeek().get(instant)); } } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public int get(long instant) { return map(chronology.dayOfWeek().get(instant)); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public int get(long instant) { return map(chronology.dayOfWeek().get(instant)); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public int get(long instant) { return map(chronology.dayOfWeek().get(instant)); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } |
@Test public void getAsText() { assertTrue("Sunday".equalsIgnoreCase(instance.getAsText(1526173261000L))); assertTrue("Monday".equalsIgnoreCase(instance.getAsText(1526259661000L))); assertTrue("Saturday".equalsIgnoreCase(instance.getAsText(1526086861000L))); } | @Override public String getAsText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsText(reverse(fieldValue), locale); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsText(reverse(fieldValue), locale); } } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } |
@Test public void getAsShortText() { assertTrue("Sun".equalsIgnoreCase(instance.getAsShortText(1526173261000L))); assertTrue("Mon".equalsIgnoreCase(instance.getAsShortText(1526259661000L))); assertTrue("Sat".equalsIgnoreCase(instance.getAsShortText(1526086861000L))); } | @Override public String getAsShortText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsShortText(reverse(fieldValue), locale); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsShortText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsShortText(reverse(fieldValue), locale); } } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsShortText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsShortText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsShortText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsShortText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } | DayOfWeekFromSundayDateTimeField extends PreciseDurationDateTimeField { @Override public String getAsShortText(int fieldValue, Locale locale) { return chronology.dayOfWeek().getAsShortText(reverse(fieldValue), locale); } DayOfWeekFromSundayDateTimeField(Chronology chronology, DurationField days); @Override int get(long instant); @Override String getAsText(int fieldValue, Locale locale); @Override String getAsShortText(int fieldValue, Locale locale); @Override DurationField getRangeDurationField(); @Override int getMinimumValue(); @Override int getMaximumValue(); @Override int getMaximumTextLength(Locale locale); @Override int getMaximumShortTextLength(Locale locale); @Override String toString(); } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.