method2testcases
stringlengths 118
6.63k
|
---|
### Question:
GraphiteValidator { public ValidatedGraphite validate(Graphite graphite) { if (graphite == null) { graphite = new Graphite(); } Graphite validated = loader.load(graphite.getConfig()); if (graphite.getHost() != null) { validated.setHost(graphite.getHost()); } if (graphite.getPrefix() != null) { validated.setPrefix(graphite.getPrefix()); } if (graphite.getNamespace() != null) { validated.setNamespace(graphite.getNamespace()); } validated.init(); if (validated.isEnabled()) { if (validated.getHost() == null || validated.getPrefix() == null || validated.getNamespace() == null) { throw new CircusTrainException( String.format("Missing graphite configuration property: host[%s], prefix[%s], namespace[%s]", validated.getHost(), validated.getPrefix(), validated.getNamespace())); } } return new ValidatedGraphite(validated); } GraphiteValidator(Configuration conf); GraphiteValidator(GraphiteLoader loader); ValidatedGraphite validate(Graphite graphite); }### Answer:
@Test public void nullGraphite() { ValidatedGraphite validated = validator.validate(null); assertThat(validated.isEnabled(), is(false)); }
@Test public void disabledGraphite() { input.init(); ValidatedGraphite validated = validator.validate(input); assertThat(validated.isEnabled(), is(false)); }
@Test public void enabledConfigOnly() { input.setConfig(new Path(".")); input.init(); ValidatedGraphite validated = validator.validate(input); assertThat(validated.isEnabled(), is(false)); }
@Test(expected = CircusTrainException.class) public void enabledNoHost() { input.setPrefix("prefix"); input.setNamespace("namespace"); input.init(); validator.validate(input); }
@Test(expected = CircusTrainException.class) public void enabledNoPrefix() { input.setHost("host"); input.setNamespace("namespace"); input.init(); validator.validate(input); }
@Test(expected = CircusTrainException.class) public void enabledNoNamespace() { input.setHost("host"); input.setPrefix("prefix"); input.init(); validator.validate(input); }
@Test public void enabledHostPrefixAndNamespace() { input.setHost("host"); input.setPrefix("prefix"); input.setNamespace("namespace"); input.init(); ValidatedGraphite validated = validator.validate(input); assertThat(validated.isEnabled(), is(true)); assertThat(validated.getHost(), is("host")); assertThat(validated.getFormattedPrefix(), is("prefix.namespace")); }
@Test public void enabledHostPrefixAndNamespaceFromConfigFile() { loaded.setHost("hostx"); loaded.setPrefix("prefixx"); loaded.setNamespace("namespacex"); loaded.init(); ValidatedGraphite validated = validator.validate(input); assertThat(validated.isEnabled(), is(true)); assertThat(validated.getHost(), is("hostx")); assertThat(validated.getFormattedPrefix(), is("prefixx.namespacex")); }
@Test public void enabledHostPrefixAndNamespaceFromConfigFileWithOverride() { loaded.setHost("hostx"); loaded.setPrefix("prefixx"); loaded.setNamespace("namespacex"); loaded.init(); input.setHost("host"); input.setPrefix("prefix"); input.setNamespace("namespace"); input.init(); ValidatedGraphite validated = validator.validate(input); assertThat(validated.isEnabled(), is(true)); assertThat(validated.getHost(), is("host")); assertThat(validated.getFormattedPrefix(), is("prefix.namespace")); } |
### Question:
MetricsConf { @Bean MetricSender metricSender(ValidatedGraphite validatedGraphite) { if (validatedGraphite.isEnabled()) { return GraphiteMetricSender.newInstance(validatedGraphite.getHost(), validatedGraphite.getFormattedPrefix()); } return MetricSender.DEFAULT_LOG_ONLY; } }### Answer:
@Test public void graphiteMetricSender() { when(validatedGraphite.getHost()).thenReturn("localhost:123"); when(validatedGraphite.getFormattedPrefix()).thenReturn("prefix.namespace"); when(validatedGraphite.isEnabled()).thenReturn(true); MetricSender sender = new MetricsConf().metricSender(validatedGraphite); assertTrue(sender instanceof GraphiteMetricSender); }
@Test public void defaultMetricSender() { when(validatedGraphite.isEnabled()).thenReturn(false); MetricSender sender = new MetricsConf().metricSender(validatedGraphite); assertTrue(sender == MetricSender.DEFAULT_LOG_ONLY); } |
### Question:
MetricsConf { @Bean ScheduledReporterFactory runningScheduledReporterFactory( MetricRegistry runningMetricRegistry, ValidatedGraphite validatedGraphite) { if (validatedGraphite.isEnabled()) { return new GraphiteScheduledReporterFactory(runningMetricRegistry, validatedGraphite.getHost(), validatedGraphite.getFormattedPrefix()); } return new LoggingScheduledReporterFactory(runningMetricRegistry); } }### Answer:
@Test public void graphiteReporter() { when(validatedGraphite.isEnabled()).thenReturn(true); ScheduledReporterFactory reporterFactory = new MetricsConf().runningScheduledReporterFactory(new MetricRegistry(), validatedGraphite); assertTrue(reporterFactory instanceof GraphiteScheduledReporterFactory); }
@Test public void defaultReporter() { when(validatedGraphite.isEnabled()).thenReturn(false); ScheduledReporterFactory reporterFactory = new MetricsConf().runningScheduledReporterFactory(new MetricRegistry(), validatedGraphite); assertTrue(reporterFactory instanceof LoggingScheduledReporterFactory); } |
### Question:
GraphiteLoader { Graphite load(Path path) { Graphite graphite = new Graphite(); if (path != null) { graphite.setConfig(path); Properties properties = loadProperties(path); graphite.setHost(properties.getProperty("graphite.host")); graphite.setPrefix(properties.getProperty("graphite.prefix")); graphite.setNamespace(properties.getProperty("graphite.namespace")); } return graphite; } GraphiteLoader(Configuration conf); }### Answer:
@Test public void readAllProps() throws IOException { Properties properties = new Properties(); properties.put("graphite.host", "h"); properties.put("graphite.prefix", "p"); properties.put("graphite.namespace", "n"); try (OutputStream outputStream = new FileOutputStream(clusterProperties)) { properties.store(outputStream, null); } Graphite graphite = loader.load(path); assertThat(graphite.getConfig(), is(path)); assertThat(graphite.getHost(), is("h")); assertThat(graphite.getPrefix(), is("p")); assertThat(graphite.getNamespace(), is("n")); }
@Test public void nullPath() throws IOException { Graphite graphite = loader.load(null); assertThat(graphite.getConfig(), is(nullValue())); assertThat(graphite.getHost(), is(nullValue())); assertThat(graphite.getPrefix(), is(nullValue())); assertThat(graphite.getNamespace(), is(nullValue())); }
@Test(expected = CircusTrainException.class) public void pathDoesNotExist() throws IOException { loader.load(new Path(new File(temp.getRoot(), "dummy.properties").toURI())); }
@SuppressWarnings("unchecked") @Test(expected = CircusTrainException.class) public void ioException() throws IOException { Path mockPath = mock(Path.class); when(mockPath.getFileSystem(conf)).thenThrow(IOException.class); loader.load(mockPath); } |
### Question:
Graphite { public void setHost(String host) { this.host = host; } boolean isEnabled(); Path getConfig(); void setConfig(Path config); String getHost(); void setHost(String host); String getPrefix(); void setPrefix(String prefix); String getNamespace(); void setNamespace(String namespace); }### Answer:
@Test public void validHostAndPort() { graphite.setHost("foo.com:1234"); Set<ConstraintViolation<Graphite>> violations = validator.validate(graphite); assertThat(violations.size(), is(0)); }
@Test public void missingPort() { graphite.setHost("foo"); Set<ConstraintViolation<Graphite>> violations = validator.validate(graphite); assertThat(violations.size(), is(1)); }
@Test public void nullHost() { graphite.setHost(null); Set<ConstraintViolation<Graphite>> violations = validator.validate(graphite); assertThat(violations.size(), is(0)); } |
### Question:
JobMetrics implements Metrics { @Override public Map<String, Long> getMetrics() { return metrics; } JobMetrics(Job job, String bytesReplicatedGroup, String bytesReplicatedCounter); JobMetrics(Job job, Enum<?> bytesReplicatedCounter); JobMetrics(Job job, String bytesReplicatedKey); @Override Map<String, Long> getMetrics(); @Override long getBytesReplicated(); }### Answer:
@Test public void nullJob() throws Exception { Map<String, Long> metrics = new JobMetrics(null, GROUP, COUNTER).getMetrics(); assertThat(metrics.size(), is(0)); }
@Test public void nullCounters() throws Exception { when(job.getCounters()).thenReturn(null); Map<String, Long> metrics = new JobMetrics(job, GROUP, COUNTER).getMetrics(); assertThat(metrics.size(), is(0)); } |
### Question:
BufferedPartitionFetcher implements PartitionFetcher { @Override public Partition fetch(String partitionName) { int partitionPosition = partitionNames.indexOf(partitionName); if (partitionPosition < 0) { throw new PartitionNotFoundException("Unknown partition " + partitionName); } if (!buffer.containsKey(partitionName)) { bufferPartitions(partitionPosition); } return buffer.get(partitionName); } BufferedPartitionFetcher(IMetaStoreClient metastore, Table table, short bufferSize); @Override Partition fetch(String partitionName); }### Answer:
@Test(expected = PartitionNotFoundException.class) public void unknowPartition() throws Exception { when(metastore.getPartitionsByNames(DATABASE_NAME, TABLE_NAME, Arrays.asList("a=01", "a=02", "a=03"))) .thenReturn(Arrays.asList(p01, p02, p03)); BufferedPartitionFetcher fetcher = spy(new BufferedPartitionFetcher(metastore, table, (short) 10)); fetcher.fetch("a=10"); } |
### Question:
HiveLanguageParser { public void parse(String statement, NodeProcessor nodeProcessor) { Context parserContext; try { parserContext = new Context(hiveConf); } catch (IOException e) { throw new RuntimeException("Unable to create Context for parser", e); } ParseDriver parserDriver = new ParseDriver(); ASTNode tree; try { tree = parserDriver.parse(statement, parserContext); } catch (ParseException e) { throw new HiveParseException(e); } Map<Rule, NodeProcessor> rules = new LinkedHashMap<>(); Dispatcher dispatcher = new DefaultRuleDispatcher(nodeProcessor, rules, null); GraphWalker walker = new DefaultGraphWalker(dispatcher); List<Node> topNodes = new ArrayList<>(); topNodes.add(tree); try { walker.startWalking(topNodes, null); } catch (SemanticException e) { throw new HiveSemanticException(e); } } HiveLanguageParser(HiveConf hiveConfiguration); void parse(String statement, NodeProcessor nodeProcessor); }### Answer:
@SuppressWarnings("unchecked") @Test public void typical() throws Exception { parser.parse(CREATE_TABLE_STATEMENT, nodeProcessor); verify(nodeProcessor, times(49)).process(any(Node.class), any(Stack.class), any(NodeProcessorCtx.class), anyVararg()); }
@Test(expected = HiveParseException.class) public void invalidSyntax() throws Exception { parser.parse("CREATE TABLE abc () LOCATION 'path'", nodeProcessor); } |
### Question:
S3S3CopierOptions { public String getAssumedRole() { return MapUtils.getString(copierOptions, Keys.ASSUME_ROLE.keyName(), null); } S3S3CopierOptions(); S3S3CopierOptions(Map<String, Object> copierOptions); void setMaxThreadPoolSize(int maxThreadPoolSize); int getMaxThreadPoolSize(); Long getMultipartCopyThreshold(); Long getMultipartCopyPartSize(); URI getS3Endpoint(); URI getS3Endpoint(String region); Boolean isS3ServerSideEncryption(); CannedAccessControlList getCannedAcl(); String getAssumedRole(); int getAssumedRoleCredentialDuration(); int getMaxCopyAttempts(); }### Answer:
@Test public void getAssumedRole() throws Exception { copierOptions.put(S3S3CopierOptions.Keys.ASSUME_ROLE.keyName(), "iam:role:1234:user"); S3S3CopierOptions options = new S3S3CopierOptions(copierOptions); assertThat(options.getAssumedRole(), is("iam:role:1234:user")); }
@Test public void getAssumedRoleDefaultIsNull() throws Exception { S3S3CopierOptions options = new S3S3CopierOptions(copierOptions); assertNull(options.getAssumedRole()); } |
### Question:
BindGoogleHadoopFileSystem { public void bindFileSystem(Configuration configuration) { LOG.debug("Binding GoogleHadoopFileSystem"); configuration.set(GS_FS_IMPLEMENTATION, GoogleHadoopFileSystem.class.getName()); configuration.set(GS_ABSTRACT_FS, GoogleHadoopFS.class.getName()); configuration.set(GCP_SERVICE_ACCOUNT_ENABLE, "true"); configuration.set(GCP_PROJECT_ID, "_THIS_VALUE_DOESNT_MATTER"); loadGSFileSystem(configuration); } void bindFileSystem(Configuration configuration); }### Answer:
@Test public void bindFileSystemTest() throws Exception { Configuration conf = new Configuration(); BindGoogleHadoopFileSystem binder = new BindGoogleHadoopFileSystem(); binder.bindFileSystem(conf); assertNotNull(conf.get(GCP_PROJECT_ID)); assertEquals("true", conf.get(GCP_SERVICE_ACCOUNT_ENABLE)); assertEquals(GoogleHadoopFileSystem.class.getName(), conf.get(GS_FS_IMPLEMENTATION)); assertEquals(GoogleHadoopFS.class.getName(), conf.get(GS_ABSTRACT_FS)); } |
### Question:
GCPCredentialPathProvider { public Path newPath() { String credentialProviderPath = security.getCredentialProvider(); if (isBlank(credentialProviderPath)) { return null; } java.nio.file.Path currentDirectory = Paths.get(System.getProperty("user.dir")); java.nio.file.Path pathToCredentialsFile = Paths.get(security.getCredentialProvider()); if (pathToCredentialsFile.isAbsolute()) { java.nio.file.Path pathRelative = currentDirectory.relativize(pathToCredentialsFile); return new Path(pathRelative.toString()); } else { return new Path(pathToCredentialsFile.toString()); } } @Autowired GCPCredentialPathProvider(GCPSecurity security); Path newPath(); }### Answer:
@Test public void newInstanceWithRelativePath() { String relativePath = "../test.json"; security.setCredentialProvider(relativePath); String result = new GCPCredentialPathProvider(security).newPath().toString(); assertThat(result, is(relativePath)); }
@Test public void newInstanceWithAbsolutePath() { security.setCredentialProvider("/test.json"); String result = new GCPCredentialPathProvider(security).newPath().toString(); assertFalse(new Path(result).isAbsolute()); assertThat(result, startsWith("../")); }
@Test public void newInstanceWithBlankPath() { security.setCredentialProvider(""); Path result = new GCPCredentialPathProvider(security).newPath(); assertNull(result); } |
### Question:
DistributedFileSystemPathProvider { public Path newPath(Configuration configuration) { String randomString = randomStringFactory.newInstance(); if (isBlank(security.getDistributedFileSystemWorkingDirectory())) { Path parentDirectory = getTemporaryFolder(configuration, randomString); return new Path(parentDirectory, GCP_KEY_NAME); } else { Path parentDirectory = new Path(security.getDistributedFileSystemWorkingDirectory(), randomString); return new Path(parentDirectory, GCP_KEY_NAME); } } @Autowired DistributedFileSystemPathProvider(GCPSecurity security, RandomStringFactory randomStringFactory); Path newPath(Configuration configuration); }### Answer:
@Test public void newInstanceWithoutConfigurationSet() { String dfsDirectory = "hdfs:/test-dir/"; doReturn(randomString).when(randomStringFactory).newInstance(); doReturn(dfsDirectory).when(configuration).get("hive.exec.scratchdir"); Path directory = new DistributedFileSystemPathProvider(security, randomStringFactory).newPath(configuration); String baseDirectoryExpected = dfsDirectory + randomString; assertThat(directory, is(new Path(baseDirectoryExpected, DistributedFileSystemPathProvider.GCP_KEY_NAME))); }
@Test public void newInstanceWithConfigurationSet() { String providedDirectory = "hdfs:/test/directory"; doReturn(randomString).when(randomStringFactory).newInstance(); security.setDistributedFileSystemWorkingDirectory(providedDirectory); Path directory = new DistributedFileSystemPathProvider(security, randomStringFactory).newPath(configuration); assertThat(directory.toString(), is(providedDirectory + "/" + randomString + "/" + DistributedFileSystemPathProvider.GCP_KEY_NAME)); }
@Test public void newInstanceWithoutHiveConfScratchDir() { doReturn("").when(configuration).get("hive.exec.scratchdir"); doReturn(randomString).when(randomStringFactory).newInstance(); Path directory = new DistributedFileSystemPathProvider(security, randomStringFactory).newPath(configuration); String baseDirectoryExpected = DistributedFileSystemPathProvider.DEFAULT_HDFS_PREFIX + randomString; assertThat(directory, is(new Path(baseDirectoryExpected, DistributedFileSystemPathProvider.GCP_KEY_NAME))); } |
### Question:
GCPBeanPostProcessor implements BeanPostProcessor { @Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (CommonBeans.BEAN_BASE_CONF.equals(beanName)) { Configuration baseConf = (Configuration) bean; setHadoopConfiguration(baseConf); return baseConf; } return bean; } @Autowired GCPBeanPostProcessor(
GCPCredentialPathProvider credentialPathProvider,
DistributedFileSystemPathProvider dfsPathProvider,
BindGoogleHadoopFileSystem bindGoogleHadoopFileSystem,
FileSystemFactory fileSystemFactory,
GCPCredentialCopier credentialCopier); @Override Object postProcessBeforeInitialization(Object bean, String beanName); @Override Object postProcessAfterInitialization(Object bean, String beanName); }### Answer:
@Test public void postProcessAfterInitializationWithIncorrectBeanName() throws Exception { String beanName = "notBaseConf"; processor.postProcessAfterInitialization(configuration, beanName); verifyZeroInteractions(credentialPathProvider, bindGoogleHadoopFileSystem, credentialCopier); }
@Test public void postProcessAfterInitializationWithConfigurationBeanProviderPathIsNotNull() throws Exception { String beanName = CommonBeans.BEAN_BASE_CONF; when(credentialPathProvider.newPath()).thenReturn(new Path("/test.json")); when(fileSystemFactory.getFileSystem(configuration)).thenReturn(fileSystem); processor.postProcessAfterInitialization(configuration, beanName); verify(bindGoogleHadoopFileSystem).bindFileSystem(configuration); verify(credentialCopier) .copyCredentials(fileSystem, configuration, credentialPathProvider, distributedFileSystemPathProvider); }
@Test public void postProcessAfterInitializationWithConfigurationBeanProviderPathIsNull() throws Exception { String beanName = CommonBeans.BEAN_BASE_CONF; when(credentialPathProvider.newPath()).thenReturn(null); processor.postProcessAfterInitialization(configuration, beanName); verifyZeroInteractions(bindGoogleHadoopFileSystem, credentialCopier, fileSystemFactory); } |
### Question:
GCPCredentialCopier { public void copyCredentials( FileSystem fs, Configuration conf, GCPCredentialPathProvider credentialPathProvider, DistributedFileSystemPathProvider dfsPathProvider) { try { Path source = credentialPathProvider.newPath(); Path destination = dfsPathProvider.newPath(conf); copyCredentialIntoHdfs(fs, source, destination); linkRelativePathInDistributedCache(conf, source, destination); } catch (IOException | URISyntaxException e) { throw new CircusTrainException(e); } } void copyCredentials(
FileSystem fs,
Configuration conf,
GCPCredentialPathProvider credentialPathProvider,
DistributedFileSystemPathProvider dfsPathProvider); }### Answer:
@Test public void copyCredentialsWithCredentialProviderSupplied() throws Exception { copier.copyCredentials(fileSystem, conf, credentialPathProvider, distributedFileSystemPathProvider); verify(fileSystem).copyFromLocalFile(credentialsFileRelativePath, dfsAbsolutePath); verify(fileSystem).deleteOnExit(dfsDirectory); assertNotNull(conf.get(DISTRIBUTED_CACHE_PROPERTY)); assertThat(conf.get(DISTRIBUTED_CACHE_PROPERTY), is(dfsAbsolutePath + SYMLINK_FLAG + credentialsFileRelativePath)); }
@Test(expected = CircusTrainException.class) public void copyCredentialsWhenFileDoesntExistThrowsException() throws Exception { doThrow(new IOException("File does not exist")) .when(fileSystem) .copyFromLocalFile(any(Path.class), any(Path.class)); copier.copyCredentials(fileSystem, conf, credentialPathProvider, distributedFileSystemPathProvider); } |
### Question:
DistCpCopierFactory implements CopierFactory { @Override public boolean supportsSchemes(String sourceScheme, String replicaScheme) { return true; } @Autowired DistCpCopierFactory(@Value("#{sourceHiveConf}") Configuration conf, MetricRegistry runningMetricsRegistry); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); @Override Copier newInstance(CopierContext copierContext); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
Path replicaLocation,
Map<String, Object> copierOptions); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
List<Path> sourceSubLocations,
Path replicaLocation,
Map<String, Object> copierOptions); }### Answer:
@Test public void supportSchemes() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); assertThat(factory.supportsSchemes("hdfs", "s3a"), is(true)); assertThat(factory.supportsSchemes("hdfs", "s3"), is(true)); assertThat(factory.supportsSchemes("hdfs", "s3n"), is(true)); assertThat(factory.supportsSchemes("hdfs", "hdfs"), is(true)); assertThat(factory.supportsSchemes("hdfs", "file"), is(true)); assertThat(factory.supportsSchemes("hdfs", "other"), is(true)); } |
### Question:
DistCpCopierFactory implements CopierFactory { @Override public Copier newInstance(CopierContext copierContext) { return new DistCpCopier(conf, copierContext.getSourceBaseLocation(), copierContext.getSourceSubLocations(), copierContext.getReplicaLocation(), copierContext.getCopierOptions(), runningMetricsRegistry); } @Autowired DistCpCopierFactory(@Value("#{sourceHiveConf}") Configuration conf, MetricRegistry runningMetricsRegistry); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); @Override Copier newInstance(CopierContext copierContext); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
Path replicaLocation,
Map<String, Object> copierOptions); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
List<Path> sourceSubLocations,
Path replicaLocation,
Map<String, Object> copierOptions); }### Answer:
@Test public void hdfsTableCopier() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); Copier copier = factory.newInstance("evt-123", new Path("confLocation"), Collections.<Path> emptyList(), new Path("hdfs:/replicaLocation"), copierOptions); assertEquals(DistCpCopier.class, copier.getClass()); }
@Test public void fileTableCopier() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); Copier copier = factory.newInstance("evt-123", new Path("confLocation"), Collections.<Path> emptyList(), new Path("file:/replicaLocation"), copierOptions); assertEquals(DistCpCopier.class, copier.getClass()); }
@Test public void s3aTableCopier() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); Copier copier = factory.newInstance("evt-123", new Path("confLocation"), Collections.<Path> emptyList(), new Path("s3a:/replicaLocation"), copierOptions); assertEquals(DistCpCopier.class, copier.getClass()); }
@Test public void s3TableCopier() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); Copier copier = factory.newInstance("evt-123", new Path("confLocation"), Collections.<Path> emptyList(), new Path("s3:/replicaLocation"), copierOptions); assertEquals(DistCpCopier.class, copier.getClass()); }
@Test public void s3nTableCopier() { DistCpCopierFactory factory = new DistCpCopierFactory(conf, metricRegistry); Copier copier = factory.newInstance("evt-123", new Path("confLocation"), Collections.<Path> emptyList(), new Path("s3n:/replicaLocation"), copierOptions); assertEquals(DistCpCopier.class, copier.getClass()); } |
### Question:
HdfsDataManipulatorFactory implements DataManipulatorFactory { @Override public boolean supportsSchemes(String sourceScheme, String replicaScheme) { return true; } @Autowired HdfsDataManipulatorFactory(@Value("#{replicaHiveConf}") Configuration conf); @Override DataManipulator newInstance(Path path, Map<String, Object> copierOptions); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); }### Answer:
@Test public void checkSupportsHdfs() { sourceLocation = hdfsPath; replicaLocation = hdfsPath; assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); }
@Test public void checkSupportsHdfsUpperCase() { sourceLocation = hdfsPath.toUpperCase(); replicaLocation = hdfsPath.toUpperCase(); assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); }
@Test public void checkSupportsS3ToHdfs() { sourceLocation = s3Path; replicaLocation = hdfsPath; assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); }
@Test public void checkSupportsS3() { sourceLocation = s3Path; replicaLocation = s3Path; assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); }
@Test public void checkSupportsHdfsToS3() { sourceLocation = hdfsPath; replicaLocation = s3Path; assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); }
@Test public void checkSupportsRandomPaths() { sourceLocation = "<path>"; replicaLocation = "<path>"; assertTrue(dataManipulatorFactory.supportsSchemes(sourceLocation, replicaLocation)); } |
### Question:
FileStatusTreeTraverser extends TreeTraverser<FileStatus> { @Override public Iterable<FileStatus> children(FileStatus root) { if (root.isFile()) { return ImmutableList.of(); } try { FileStatus[] listStatus = fileSystem.listStatus(root.getPath()); if (listStatus == null || listStatus.length == 0) { return ImmutableList.of(); } return ImmutableList.copyOf(listStatus); } catch (IOException e) { throw new CircusTrainException("Unable to list children for path: " + root.getPath()); } } FileStatusTreeTraverser(FileSystem fileSystem); @Override Iterable<FileStatus> children(FileStatus root); }### Answer:
@Test public void typical() throws IOException { when(status.isFile()).thenReturn(false); FileStatus fileStatus = new FileStatus(); when(fileSystem.listStatus(any(Path.class))).thenReturn(new FileStatus[] { fileStatus }); Iterable<FileStatus> children = traverser.children(status); verify(fileSystem).listStatus(any(Path.class)); assertThat(Iterables.size(children), is(1)); assertTrue(children.iterator().next() == fileStatus); }
@Test public void noChildren() throws IOException { when(status.isFile()).thenReturn(false); when(fileSystem.listStatus(any(Path.class))).thenReturn(new FileStatus[] {}); Iterable<FileStatus> children = traverser.children(status); verify(fileSystem).listStatus(any(Path.class)); assertThat(Iterables.size(children), is(0)); }
@Test public void nullListStatus() throws IOException { when(status.isFile()).thenReturn(false); when(fileSystem.listStatus(any(Path.class))).thenReturn(null); Iterable<FileStatus> children = traverser.children(status); verify(fileSystem).listStatus(any(Path.class)); assertThat(Iterables.size(children), is(0)); }
@Test public void rootIsFile() throws IOException { when(status.isFile()).thenReturn(true); Iterable<FileStatus> children = traverser.children(status); verify(fileSystem, never()).listStatus(any(Path.class)); assertThat(Iterables.size(children), is(0)); }
@Test(expected = CircusTrainException.class) public void fileSystemException() throws IOException { when(status.isFile()).thenReturn(false); doThrow(IOException.class).when(fileSystem).listStatus(any(Path.class)); traverser.children(status); } |
### Question:
DistCpCopier implements Copier { @Override public Metrics copy() throws CircusTrainException { LOG.info("Copying table data."); LOG.debug("Invoking DistCp: {} -> {}", sourceDataBaseLocation, replicaDataLocation); DistCpOptions distCpOptions = parseCopierOptions(copierOptions); LOG.debug("Invoking DistCp with options: {}", distCpOptions); CircusTrainCopyListing.setAsCopyListingClass(conf); CircusTrainCopyListing.setRootPath(conf, sourceDataBaseLocation); try { distCpOptions.setBlocking(false); Job job = executor.exec(conf, distCpOptions); String counter = String .format("%s_BYTES_WRITTEN", replicaDataLocation.toUri().getScheme().toUpperCase(Locale.ROOT)); registerRunningJobMetrics(job, counter); if (!job.waitForCompletion(true)) { throw new IOException( "DistCp failure: Job " + job.getJobID() + " has failed: " + job.getStatus().getFailureInfo()); } return new JobMetrics(job, FileSystemCounter.class.getName(), counter); } catch (Exception e) { cleanUpReplicaDataLocation(); throw new CircusTrainException("Unable to copy file(s)", e); } } DistCpCopier(
Configuration conf,
Path sourceDataBaseLocation,
List<Path> sourceDataLocations,
Path replicaDataLocation,
Map<String, Object> copierOptions,
MetricRegistry registry); DistCpCopier(
Configuration conf,
Path sourceDataBaseLocation,
List<Path> sourceDataLocations,
Path replicaDataLocation,
Map<String, Object> copierOptions,
DistCpExecutor executor,
MetricRegistry registry); @Override Metrics copy(); }### Answer:
@Test public void typical() throws Exception { copier = new DistCpCopier(conf, sourceDataBaseLocation, sourceDataLocations, replicaDataLocation, null, registry); Metrics metrics = copier.copy(); assertThat(metrics, not(nullValue())); String outputPath = replicaDataLocation.toUri().getPath(); File outputSub2Data = new File(outputPath, "sub1/sub2/data"); assertTrue(outputSub2Data.exists()); assertThat(Files.asCharSource(outputSub2Data, UTF_8).read(), is("test1")); File outputSub4Data = new File(outputPath, "sub3/sub4/data"); assertTrue(outputSub4Data.exists()); assertThat(Files.asCharSource(outputSub4Data, UTF_8).read(), is("test2")); assertThat(registry.getGauges().containsKey(RunningMetrics.DIST_CP_BYTES_REPLICATED.name()), is(true)); }
@Test public void typicalOneFile() throws Exception { Path inputFile = new Path(sourceDataBaseLocation, "sub1/sub2/data"); Path targetFile = new Path(replicaDataLocation, "output.txt"); copier = new DistCpCopier(conf, inputFile, Collections.<Path>emptyList(), targetFile, null, registry); Metrics metrics = copier.copy(); assertThat(metrics, not(nullValue())); String outputPath = targetFile.toUri().getPath(); Path parent = targetFile.getParent(); FileSystem fs = parent.getFileSystem(conf); int fileCopyCount = fs.listStatus(parent).length; assertThat(fileCopyCount, is(1)); File outputSub2Data = new File(outputPath); assertTrue(outputSub2Data.exists()); assertThat(Files.asCharSource(outputSub2Data, UTF_8).read(), is("test1")); }
@Test public void cleanUpOnFailure() throws Exception { Map<String, Object> copierOptions = new HashMap<>(); copierOptions.put("file-attribute", "xattr"); copier = new DistCpCopier(conf, sourceDataBaseLocation, sourceDataLocations, replicaDataLocation, copierOptions, registry); try { copier.copy(); fail("Expecting copier failure"); } catch (CircusTrainException e) { String outputPath = replicaDataLocation.toUri().toString(); assertFalse(new File(outputPath).exists()); } } |
### Question:
SourceHiveEndpoint extends HiveEndpoint { @Override public TableAndStatistics getTableAndStatistics(TableReplication tableReplication) { SourceTable sourceTable = tableReplication.getSourceTable(); return super.getTableAndStatistics(sourceTable.getDatabaseName(), sourceTable.getTableName()); } SourceHiveEndpoint(
String name,
HiveConf hiveConf,
Supplier<CloseableMetaStoreClient> metaStoreClientSupplier); @Override TableAndStatistics getTableAndStatistics(TableReplication tableReplication); }### Answer:
@Test public void useCorrectReplicaTableName() throws Exception { SourceHiveEndpoint replicaDiffEndpoint = new SourceHiveEndpoint("name", hiveConf, metastoreSupplier); when(metastoreSupplier.get()).thenReturn(metastoreClient); when(metastoreClient.getTable("dbname", "tableName")).thenReturn(table); when(table.getSd()).thenReturn(sd); when(tableReplication.getSourceTable()).thenReturn(sourceTable); when(sourceTable.getDatabaseName()).thenReturn("dbname"); when(sourceTable.getTableName()).thenReturn("tableName"); TableAndStatistics tableAndStats = replicaDiffEndpoint.getTableAndStatistics(tableReplication); assertThat(tableAndStats.getTable(), is(table)); } |
### Question:
RelativePathFunction implements Function<FileStatus, String> { @Override public String apply(@Nonnull FileStatus fileStatus) { return DistCpUtils.getRelativePath(sourceRootPath, fileStatus.getPath()); } RelativePathFunction(Path sourceRootPath); @Override String apply(@Nonnull FileStatus fileStatus); }### Answer:
@Test public void typical() { Path sourceRootPath = new Path("/root/"); Path path = new Path("/root/foo/bar/"); when(fileStatus.getPath()).thenReturn(path); String relativePath = new RelativePathFunction(sourceRootPath).apply(fileStatus); assertThat(relativePath, is("/foo/bar")); } |
### Question:
CircusTrainCopyListing extends SimpleCopyListing { static void setAsCopyListingClass(Configuration conf) { conf.setClass(CONF_LABEL_COPY_LISTING_CLASS, CircusTrainCopyListing.class, CopyListing.class); } CircusTrainCopyListing(Configuration configuration, Credentials credentials); @Override void doBuildListing(Path pathToListFile, DistCpOptions options); }### Answer:
@Test public void copyListingClass() { CircusTrainCopyListing.setAsCopyListingClass(conf); assertThat(conf.get(DistCpConstants.CONF_LABEL_COPY_LISTING_CLASS), is(CircusTrainCopyListing.class.getName())); } |
### Question:
CircusTrainCopyListing extends SimpleCopyListing { static Path getRootPath(Configuration conf) { String pathString = conf.get(CONF_ROOT_PATH); if (pathString == null) { throw new CircusTrainException("No root path was set."); } return new Path(pathString); } CircusTrainCopyListing(Configuration configuration, Credentials credentials); @Override void doBuildListing(Path pathToListFile, DistCpOptions options); }### Answer:
@Test(expected = CircusTrainException.class) public void rootPathNotSet() { CircusTrainCopyListing.getRootPath(conf); } |
### Question:
IoUtil { public static void closeSilently(Closeable... closeables) { closeSilently(null, closeables); } private IoUtil(); static void closeSilently(Closeable... closeables); static void closeSilently(Logger log, Closeable... closeables); }### Answer:
@Test public void closeSilentlyNoCloseable() { IoUtil.closeSilently(logger); verifyNoMoreInteractions(logger); }
@Test public void closeSilentlyTypical() throws Exception { IoUtil.closeSilently(logger, closeable); verify(closeable).close(); verifyNoMoreInteractions(logger); }
@Test public void closeSilentlyWhenCloseThrowsException() throws Exception { RuntimeException e = new RuntimeException(); doThrow(e).when(closeable).close(); IoUtil.closeSilently(logger, closeable); verify(closeable).close(); verify(logger).debug(anyString(), eq(closeable), eq(e)); } |
### Question:
ConfigurationUtil { public static <T> void publish(Configuration configuration, String label, T value) { configuration.set(label, String.valueOf(value)); } private ConfigurationUtil(); static void publish(Configuration configuration, String label, T value); static int getInt(Configuration configuration, String label); static long getLong(Configuration configuration, String label); static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options); }### Answer:
@Test public void publish() { class T { @Override public String toString() { return "Hello world!"; } } ConfigurationUtil.publish(config, "a.b.c", new T()); assertThat(config.get("a.b.c"), is("Hello world!")); } |
### Question:
ConfigurationUtil { public static int getInt(Configuration configuration, String label) { int value = configuration.getInt(label, -1); assert value >= 0 : "Couldn't find " + label; return value; } private ConfigurationUtil(); static void publish(Configuration configuration, String label, T value); static int getInt(Configuration configuration, String label); static long getLong(Configuration configuration, String label); static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options); }### Answer:
@Test(expected = AssertionError.class) public void getUnknownIntProperty() { ConfigurationUtil.getInt(config, "a.b.c"); }
@Test public void getIntProperty() { config.set("a.b.c", "1024"); assertThat(ConfigurationUtil.getInt(config, "a.b.c"), is(1024)); } |
### Question:
ConfigurationUtil { public static long getLong(Configuration configuration, String label) { long value = configuration.getLong(label, -1); assert value >= 0 : "Couldn't find " + label; return value; } private ConfigurationUtil(); static void publish(Configuration configuration, String label, T value); static int getInt(Configuration configuration, String label); static long getLong(Configuration configuration, String label); static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options); }### Answer:
@Test(expected = AssertionError.class) public void getUnknownLongProperty() { ConfigurationUtil.getLong(config, "a.b.c"); }
@Test public void getLongProperty() { config.set("a.b.c", "1234567890"); assertThat(ConfigurationUtil.getLong(config, "a.b.c"), is(1234567890L)); } |
### Question:
ConfigurationUtil { public static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options) { String confLabel = "com.hotels.bdp.circustrain.s3mapreducecp." + options.getCopyStrategy().toLowerCase(Locale.getDefault()) + ".strategy.impl"; return conf.getClass(confLabel, UniformSizeInputFormat.class, InputFormat.class); } private ConfigurationUtil(); static void publish(Configuration configuration, String label, T value); static int getInt(Configuration configuration, String label); static long getLong(Configuration configuration, String label); static Class<? extends InputFormat> getStrategy(Configuration conf, S3MapReduceCpOptions options); }### Answer:
@Test public void getDefaultStrategy() { S3MapReduceCpOptions options = new S3MapReduceCpOptions(); assertThat(ConfigurationUtil.getStrategy(config, options), is(CoreMatchers.<Class<?>> equalTo(UniformSizeInputFormat.class))); }
@Test public void getCustomStrategy() { class CustomInputFormat extends InputFormat<Text, CopyListingFileStatus> { @Override public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException { return null; } @Override public RecordReader<Text, CopyListingFileStatus> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { return null; } } config.set("com.hotels.bdp.circustrain.s3mapreducecp.my-strategy.strategy.impl", CustomInputFormat.class.getName()); S3MapReduceCpOptions options = S3MapReduceCpOptions.builder(null, null).copyStrategy("my-strategy").build(); assertThat(ConfigurationUtil.getStrategy(config, options), is(CoreMatchers.<Class<?>> equalTo(CustomInputFormat.class))); } |
### Question:
PathUtil { public static String getRelativePath(Path sourceRootPath, Path childPath) { String childPathString = childPath.toUri().getPath(); String sourceRootPathString = sourceRootPath.toUri().getPath(); return "/".equals(sourceRootPathString) ? childPathString : childPathString.substring(sourceRootPathString.length()); } private PathUtil(); static String getRelativePath(Path sourceRootPath, Path childPath); static String toString(Path path); static String toBucketName(URI uri); static String toBucketKey(URI uri); static boolean isFile(URI path); }### Answer:
@Test public void getRelativePathRoot() { Path root = new Path("/"); Path child = new Path("/a"); assertThat(PathUtil.getRelativePath(root, child), is("/a")); }
@Test public void relativePath() { Path root = new Path("/tmp/abc"); Path child = new Path("/tmp/abc/xyz/file"); assertThat(PathUtil.getRelativePath(root, child), is("/xyz/file")); } |
### Question:
PathUtil { public static String toString(Path path) { return path == null ? null : path.toUri().toString(); } private PathUtil(); static String getRelativePath(Path sourceRootPath, Path childPath); static String toString(Path path); static String toBucketName(URI uri); static String toBucketKey(URI uri); static boolean isFile(URI path); }### Answer:
@Test public void nullPathToString() { assertThat(PathUtil.toString(null), is(nullValue())); }
@Test public void pathToString() { assertThat(PathUtil.toString(new Path("/foo/bar/")), is("/foo/bar")); assertThat(PathUtil.toString(new Path("s3: } |
### Question:
PathUtil { public static String toBucketName(URI uri) { String authority = uri.getAuthority(); if (authority == null) { throw new IllegalArgumentException("URI " + uri + " has no authority part"); } return authority; } private PathUtil(); static String getRelativePath(Path sourceRootPath, Path childPath); static String toString(Path path); static String toBucketName(URI uri); static String toBucketKey(URI uri); static boolean isFile(URI path); }### Answer:
@Test(expected = NullPointerException.class) public void nullUriToBucketName() { PathUtil.toBucketName(null); }
@Test(expected = IllegalArgumentException.class) public void protocolOnlyUriToBucketName() { PathUtil.toBucketName(URI.create("s3: }
@Test(expected = IllegalArgumentException.class) public void uriWithNoAuthorityToBucketName() { PathUtil.toBucketName(URI.create("/foo/bar")); }
@Test public void uriToBucketName() { assertThat(PathUtil.toBucketName(URI.create("s3: }
@Test(expected = IllegalArgumentException.class) public void protocolOnlyUriToBucketKey() { PathUtil.toBucketName(URI.create("s3: } |
### Question:
PathUtil { public static String toBucketKey(URI uri) { String key = uri.getPath().substring(1); if (key.isEmpty()) { throw new IllegalArgumentException("URI " + uri + " has no path part"); } return key; } private PathUtil(); static String getRelativePath(Path sourceRootPath, Path childPath); static String toString(Path path); static String toBucketName(URI uri); static String toBucketKey(URI uri); static boolean isFile(URI path); }### Answer:
@Test(expected = NullPointerException.class) public void nullUriToBucketKey() { PathUtil.toBucketKey(null); }
@Test public void uriWithNoAuthorityToBucketKey() { assertThat(PathUtil.toBucketKey(URI.create("/foo/bar")), is("foo/bar")); }
@Test public void uriToBucketKey() { assertThat(PathUtil.toBucketKey(URI.create("s3: }
@Test(expected = IllegalArgumentException.class) public void uriWithNoPathToBucketKey() { PathUtil.toBucketKey(URI.create("s3: } |
### Question:
PathUtil { public static boolean isFile(URI path) { return !path.toString().endsWith("/"); } private PathUtil(); static String getRelativePath(Path sourceRootPath, Path childPath); static String toString(Path path); static String toBucketName(URI uri); static String toBucketKey(URI uri); static boolean isFile(URI path); }### Answer:
@Test public void isFile() { assertThat(PathUtil.isFile(URI.create("s3: assertThat(PathUtil.isFile(URI.create("s3: }
@Test public void isDirectory() { assertThat(PathUtil.isFile(URI.create("s3: } |
### Question:
BytesFormatter { public static String getStringDescriptionFor(long nBytes) { char[] units = { 'B', 'K', 'M', 'G', 'T', 'P' }; double current = nBytes; double prev = current; int index = 0; while ((current = current / 1024) >= 1) { prev = current; ++index; } assert index < units.length : "Too large a number."; return getFormatter().format(prev) + units[index]; } private BytesFormatter(); static DecimalFormat getFormatter(); static String getStringDescriptionFor(long nBytes); }### Answer:
@Test public void bytes() { assertThat(BytesFormatter.getStringDescriptionFor(100L), is("100.0B")); }
@Test public void kilobytes() { assertThat(BytesFormatter.getStringDescriptionFor(1024L), is("1.0K")); assertThat(BytesFormatter.getStringDescriptionFor(2256L), is("2.2K")); }
@Test public void megabytes() { assertThat(BytesFormatter.getStringDescriptionFor(1024L * 1024L), is("1.0M")); assertThat(BytesFormatter.getStringDescriptionFor((2024L * 1024L) + (256L * 1024L)), is("2.2M")); }
@Test public void gigabytes() { assertThat(BytesFormatter.getStringDescriptionFor(1024L * 1024L * 1024L), is("1.0G")); }
@Test public void terabytes() { assertThat(BytesFormatter.getStringDescriptionFor(1024L * 1024L * 1024L * 1024L), is("1.0T")); }
@Test public void petabytes() { assertThat(BytesFormatter.getStringDescriptionFor(1024L * 1024L * 1024L * 1024L * 1024L), is("1.0P")); } |
### Question:
ThrottledInputStream extends InputStream { @Override public int read() throws IOException { throttle(); int data = rawStream.read(); if (data != -1) { bytesRead++; } return data; } ThrottledInputStream(InputStream rawStream); ThrottledInputStream(InputStream rawStream, long maxBytesPerSec); @Override void close(); @Override int read(); @Override int read(byte[] b); @Override int read(byte[] b, int off, int len); int read(long position, byte[] buffer, int offset, int length); long getTotalBytesRead(); long getBytesPerSec(); long getTotalSleepTime(); @Override String toString(); }### Answer:
@Test public void read() { File tmpFile; File outFile; try { tmpFile = createFile(1024); outFile = createFile(); tmpFile.deleteOnExit(); outFile.deleteOnExit(); long maxBandwidth = copyAndAssert(tmpFile, outFile, 0, 1, -1, CB.BUFFER); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFFER); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFF_OFFSET); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.ONE_C); } catch (IOException e) { LOG.error("Exception encountered ", e); } }
@Test public void testRead() { File tmpFile; File outFile; try { tmpFile = createFile(1024); outFile = createFile(); tmpFile.deleteOnExit(); outFile.deleteOnExit(); long maxBandwidth = copyAndAssert(tmpFile, outFile, 0, 1, -1, CB.BUFFER); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFFER); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.BUFF_OFFSET); copyAndAssert(tmpFile, outFile, maxBandwidth, 20, 0, CB.ONE_C); } catch (IOException e) { LOG.error("Exception encountered ", e); } } |
### Question:
RetriableCommand { public T execute(Object... arguments) throws Exception { Exception latestException; int counter = 0; while (true) { try { return doExecute(arguments); } catch (Exception exception) { LOG.error("Failure in Retriable command: {}", description, exception); latestException = exception; } counter++; RetryAction action = retryPolicy.shouldRetry(latestException, counter, 0, true); if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis); } else { break; } } throw new IOException("Couldn't run retriable-command: " + description, latestException); } RetriableCommand(String description); RetriableCommand(String description, RetryPolicy retryPolicy); T execute(Object... arguments); RetriableCommand<T> setRetryPolicy(RetryPolicy retryPolicy); }### Answer:
@Test public void typical() { try { new MyRetriableCommand(5).execute(0); Assert.assertTrue(false); } catch (Exception e) { Assert.assertTrue(true); } try { new MyRetriableCommand(3).execute(0); Assert.assertTrue(true); } catch (Exception e) { Assert.assertTrue(false); } try { new MyRetriableCommand(5, RetryPolicies.retryUpToMaximumCountWithFixedSleep(5, 0, TimeUnit.MILLISECONDS)) .execute(0); Assert.assertTrue(true); } catch (Exception e) { Assert.assertTrue(false); } } |
### Question:
DynamicInputFormat extends InputFormat<K, V> { @Override public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException { LOG.info("DynamicInputFormat: Getting splits for job: {}", jobContext.getJobID()); return createSplits(jobContext, splitCopyListingIntoChunksWithShuffle(jobContext)); } @Override List<InputSplit> getSplits(JobContext jobContext); @Override RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext); }### Answer:
@Test public void getSplits() throws Exception { S3MapReduceCpOptions options = getOptions(); Configuration configuration = new Configuration(); configuration.set("mapred.map.tasks", String.valueOf(options.getMaxMaps())); CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(new Path( cluster.getFileSystem().getUri().toString() + temporaryFolder.getRoot() + "/testDynInputFormat/fileList.seq"), options); JobContext jobContext = new JobContextImpl(configuration, new JobID()); DynamicInputFormat<Text, CopyListingFileStatus> inputFormat = new DynamicInputFormat<>(); List<InputSplit> splits = inputFormat.getSplits(jobContext); int nFiles = 0; int taskId = 0; for (InputSplit split : splits) { RecordReader<Text, CopyListingFileStatus> recordReader = inputFormat.createRecordReader(split, null); StubContext stubContext = new StubContext(jobContext.getConfiguration(), recordReader, taskId); final TaskAttemptContext taskAttemptContext = stubContext.getContext(); recordReader.initialize(splits.get(0), taskAttemptContext); float previousProgressValue = 0f; while (recordReader.nextKeyValue()) { CopyListingFileStatus fileStatus = recordReader.getCurrentValue(); String source = fileStatus.getPath().toString(); assertTrue(expectedFilePaths.contains(source)); final float progress = recordReader.getProgress(); assertTrue(progress >= previousProgressValue); assertTrue(progress >= 0.0f); assertTrue(progress <= 1.0f); previousProgressValue = progress; ++nFiles; } assertTrue(recordReader.getProgress() == 1.0f); ++taskId; } Assert.assertEquals(expectedFilePaths.size(), nFiles); } |
### Question:
DynamicInputFormat extends InputFormat<K, V> { private static int getSplitRatio(Configuration conf) { int splitRatio = conf .getInt(S3MapReduceCpConstants.CONF_LABEL_SPLIT_RATIO, S3MapReduceCpConstants.SPLIT_RATIO_DEFAULT); if (splitRatio <= 0) { LOG .warn("{} should be positive. Fall back to default value: {}", S3MapReduceCpConstants.CONF_LABEL_SPLIT_RATIO, S3MapReduceCpConstants.SPLIT_RATIO_DEFAULT); splitRatio = S3MapReduceCpConstants.SPLIT_RATIO_DEFAULT; } return splitRatio; } @Override List<InputSplit> getSplits(JobContext jobContext); @Override RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext); }### Answer:
@Test public void getSplitRatio() throws Exception { assertEquals(1, DynamicInputFormat.getSplitRatio(1, 1000000000)); assertEquals(2, DynamicInputFormat.getSplitRatio(11000000, 10)); assertEquals(4, DynamicInputFormat.getSplitRatio(30, 700)); assertEquals(2, DynamicInputFormat.getSplitRatio(30, 200)); Configuration conf = new Configuration(); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, -1); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, -1); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, -1); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_SPLIT_RATIO, -1); assertEquals(1, DynamicInputFormat.getSplitRatio(1, 1000000000, conf)); assertEquals(2, DynamicInputFormat.getSplitRatio(11000000, 10, conf)); assertEquals(4, DynamicInputFormat.getSplitRatio(30, 700, conf)); assertEquals(2, DynamicInputFormat.getSplitRatio(30, 200, conf)); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE, 100); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL, 30); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK, 10); conf.setInt(S3MapReduceCpConstants.CONF_LABEL_SPLIT_RATIO, 53); assertEquals(53, DynamicInputFormat.getSplitRatio(3, 200, conf)); } |
### Question:
RegionValidator implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { try { Region.fromValue(value); } catch (IllegalArgumentException e) { throw new ParameterException("Parameter " + name + " is not a valid AWS region (found " + value + ")", e); } } @Override void validate(String name, String value); }### Answer:
@Test public void typical() { validator.validate("region", "us-west-2"); }
@Test public void nullValue() { validator.validate("region", null); }
@Test(expected = ParameterException.class) public void caseSensitive() { validator.validate("region", "US-WEST-2"); }
@Test(expected = ParameterException.class) public void invalid() { validator.validate("region", "DEFAULT_REGION"); } |
### Question:
PositiveNonZeroLong implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { long n = Long.parseLong(value); if (n <= 0) { throw new ParameterException("Parameter " + name + " should be greater than zero (found " + value + ")"); } } @Override void validate(String name, String value); }### Answer:
@Test public void typical() { validator.validate("var", "123"); }
@Test(expected = ParameterException.class) public void zero() { validator.validate("var", "0"); }
@Test(expected = ParameterException.class) public void negative() { validator.validate("var", "-1"); }
@Test(expected = NumberFormatException.class) public void invalidFormat() { validator.validate("var", "abc"); } |
### Question:
PositiveNonZeroInteger implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { int n = Integer.parseInt(value); if (n <= 0) { throw new ParameterException("Parameter " + name + " should be greater than zero (found " + value + ")"); } } @Override void validate(String name, String value); }### Answer:
@Test public void typical() { validator.validate("var", "123"); }
@Test(expected = ParameterException.class) public void zero() { validator.validate("var", "0"); }
@Test(expected = ParameterException.class) public void negative() { validator.validate("var", "-1"); }
@Test(expected = NumberFormatException.class) public void invalidFormat() { validator.validate("var", "abc"); } |
### Question:
StorageClassValidator implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { try { AwsUtil.toStorageClass(value); } catch (IllegalArgumentException e) { throw new ParameterException("Parameter " + name + " is not a valid AWS S3 storage class (found " + value + ")", e); } } @Override void validate(String name, String value); }### Answer:
@Test public void typical() { validator.validate("storageClass", "standard"); validator.validate("storageClass", "STANDARD"); validator.validate("storageClass", "STaNDaRD"); }
@Test public void nullValue() { validator.validate("storageClass", null); }
@Test(expected = ParameterException.class) public void invalid() { validator.validate("storageClass", "REDUCEDREDUNDANCY"); } |
### Question:
PositiveLong implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { long n = Long.parseLong(value); if (n < 0) { throw new ParameterException("Parameter " + name + " should be positive (found " + value + ")"); } } @Override void validate(String name, String value); }### Answer:
@Test public void typical() { validator.validate("var", "123"); }
@Test public void zero() { validator.validate("var", "0"); }
@Test(expected = ParameterException.class) public void negative() { validator.validate("var", "-1"); }
@Test(expected = NumberFormatException.class) public void invalidFormat() { validator.validate("var", "abc"); } |
### Question:
S3S3CopierOptions { public Long getMultipartCopyThreshold() { return MapUtils.getLong(copierOptions, Keys.MULTIPART_COPY_THRESHOLD.keyName(), null); } S3S3CopierOptions(); S3S3CopierOptions(Map<String, Object> copierOptions); void setMaxThreadPoolSize(int maxThreadPoolSize); int getMaxThreadPoolSize(); Long getMultipartCopyThreshold(); Long getMultipartCopyPartSize(); URI getS3Endpoint(); URI getS3Endpoint(String region); Boolean isS3ServerSideEncryption(); CannedAccessControlList getCannedAcl(); String getAssumedRole(); int getAssumedRoleCredentialDuration(); int getMaxCopyAttempts(); }### Answer:
@Test public void getMultipartCopyThreshold() throws Exception { copierOptions.put(S3S3CopierOptions.Keys.MULTIPART_COPY_THRESHOLD.keyName(), 128L); S3S3CopierOptions options = new S3S3CopierOptions(copierOptions); assertThat(options.getMultipartCopyThreshold(), is(128L)); }
@Test public void getMultipartCopyThresholdDefaultIsNull() throws Exception { S3S3CopierOptions options = new S3S3CopierOptions(copierOptions); assertNull(options.getMultipartCopyThreshold()); } |
### Question:
PathConverter implements IStringConverter<Path> { @Override public Path convert(String pathName) { return new Path(pathName); } @Override Path convert(String pathName); }### Answer:
@Test public void typical() { assertThat(converter.convert("s3: }
@Test(expected = IllegalArgumentException.class) public void invalid() { converter.convert("s3:"); } |
### Question:
CopyListing extends Configured { public static CopyListing getCopyListing( Configuration configuration, Credentials credentials, S3MapReduceCpOptions options) throws IOException { String copyListingClassName = configuration.get(S3MapReduceCpConstants.CONF_LABEL_COPY_LISTING_CLASS, ""); Class<? extends CopyListing> copyListingClass; try { if (!copyListingClassName.isEmpty()) { copyListingClass = configuration .getClass(S3MapReduceCpConstants.CONF_LABEL_COPY_LISTING_CLASS, SimpleCopyListing.class, CopyListing.class); } else { copyListingClass = SimpleCopyListing.class; } copyListingClassName = copyListingClass.getName(); Constructor<? extends CopyListing> constructor = copyListingClass .getDeclaredConstructor(Configuration.class, Credentials.class); return constructor.newInstance(configuration, credentials); } catch (NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { throw new IOException("Unable to instantiate " + copyListingClassName, e); } } protected CopyListing(Configuration configuration, Credentials credentials); final void buildListing(Path pathToListFile, S3MapReduceCpOptions options); static CopyListing getCopyListing(
Configuration configuration,
Credentials credentials,
S3MapReduceCpOptions options); }### Answer:
@Test(timeout = 10000) public void defaultCopyListing() throws Exception { S3MapReduceCpOptions options = S3MapReduceCpOptions .builder(Arrays.asList(new Path("/tmp/in4")), new URI("/tmp/out4")) .build(); CopyListing listing = CopyListing.getCopyListing(CONFIG, CREDENTIALS, options); assertThat(listing, is(instanceOf(SimpleCopyListing.class))); } |
### Question:
ExponentialBackoffStrategy implements BackoffStrategy { @Override public long delayBeforeNextRetry( AmazonWebServiceRequest originalRequest, AmazonClientException exception, int retriesAttempted) { long backoffDelay = retriesAttempted * errorRetryDelay; LOG.debug("Exception caught during upload, retries attempted = {}, will retry in {} ms", retriesAttempted, backoffDelay, exception); return backoffDelay; } ExponentialBackoffStrategy(long errorRetryDelay); @Override long delayBeforeNextRetry(
AmazonWebServiceRequest originalRequest,
AmazonClientException exception,
int retriesAttempted); }### Answer:
@Test public void firstBackoff() { assertThat(strategy.delayBeforeNextRetry(originalRequest, exception, 1), is(1L * ERROR_RETRY_DELAY)); }
@Test public void secondBackoff() { assertThat(strategy.delayBeforeNextRetry(originalRequest, exception, 2), is(2L * ERROR_RETRY_DELAY)); }
@Test public void thirdBackoff() { assertThat(strategy.delayBeforeNextRetry(originalRequest, exception, 3), is(3L * ERROR_RETRY_DELAY)); } |
### Question:
S3MapreduceDataManipulatorFactory implements DataManipulatorFactory { @Override public boolean supportsSchemes(String sourceScheme, String replicaScheme) { return !S3Schemes.isS3Scheme(sourceScheme) && S3Schemes.isS3Scheme(replicaScheme); } @Autowired S3MapreduceDataManipulatorFactory(@Value("#{replicaHiveConf}") Configuration conf); @Override DataManipulator newInstance(Path path, Map<String, Object> copierOptions); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); }### Answer:
@Test public void checkSupportsHdfsToS3() { sourceLocation = hdfsPath; replicaLocation = s3Path; sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertTrue(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportS3() { sourceLocation = s3Path; replicaLocation = s3Path; sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportHdfsToHdfs() { sourceLocation = hdfsPath; replicaLocation = hdfsPath; sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportRandomPaths() { sourceLocation = new Path("<path>"); replicaLocation = new Path("<path>"); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); } |
### Question:
CounterBasedRetryCondition implements RetryCondition { @Override public boolean shouldRetry( AmazonWebServiceRequest originalRequest, AmazonClientException exception, int retriesAttempted) { LOG.debug("Exception caught during upload, retries attempted = {} out of {}", retriesAttempted, maxErrorRetry, exception); return retriesAttempted <= maxErrorRetry; } CounterBasedRetryCondition(int maxErrorRetry); @Override boolean shouldRetry(
AmazonWebServiceRequest originalRequest,
AmazonClientException exception,
int retriesAttempted); }### Answer:
@Test public void doRetry() { assertThat(condition.shouldRetry(originalRequest, exception, MAX_ERROR_RETRY - 1), is(true)); }
@Test public void doRetryWhenLimitIsReached() { assertThat(condition.shouldRetry(originalRequest, exception, MAX_ERROR_RETRY), is(true)); }
@Test public void doNotRetry() { assertThat(condition.shouldRetry(originalRequest, exception, MAX_ERROR_RETRY + 1), is(false)); } |
### Question:
AwsUtil { public static StorageClass toStorageClass(String storageClass) { if (storageClass == null) { return null; } return StorageClass.fromValue(storageClass.toUpperCase(Locale.ROOT)); } private AwsUtil(); static StorageClass toStorageClass(String storageClass); }### Answer:
@Test public void toStorageClass() { assertThat(AwsUtil.toStorageClass("REDUCED_REDUNDANCY"), is(StorageClass.ReducedRedundancy)); }
@Test public void toNullStorageClass() { assertThat(AwsUtil.toStorageClass(null), is(nullValue())); }
@Test public void toStorageClassCaseSensitiveness() { assertThat(AwsUtil.toStorageClass("glacier"), is(StorageClass.Glacier)); assertThat(AwsUtil.toStorageClass("gLaCiEr"), is(StorageClass.Glacier)); }
@Test(expected = IllegalArgumentException.class) public void toInvalidStorageClass() { AwsUtil.toStorageClass("DEFAULT_STORAGE_CLASS"); } |
### Question:
SimpleCopyListing extends CopyListing { private Path makeQualified(Path path) throws IOException { final FileSystem fs = path.getFileSystem(getConf()); return path.makeQualified(fs.getUri(), fs.getWorkingDirectory()); } protected SimpleCopyListing(Configuration configuration, Credentials credentials); @Override void doBuildListing(Path pathToListingFile, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options, List<Path> globbedPaths); static final String CONF_LABEL_ROOT_PATH; }### Answer:
@Test public void typical() throws Exception { Map<String, String> expectedValues = new HashMap<>(); FileSystem fs = FileSystem.get(config); Path source = new Path(temporaryRoot + "/source"); Path p1 = new Path(source, "1"); Path p2 = new Path(source, "2"); Path p3 = new Path(source, "2/3"); Path p4 = new Path(source, "2/3/4"); Path p5 = new Path(source, "5"); Path p6 = new Path(source, "5/6"); Path p7 = new Path(source, "7"); Path p8 = new Path(source, "7/8"); Path p9 = new Path(source, "7/8/9"); fs.mkdirs(p1); fs.mkdirs(p2); fs.mkdirs(p3); fs.mkdirs(p4); fs.mkdirs(p5); createFile(fs, p6); expectedValues.put(fs.makeQualified(p6).toString(), PathUtil.getRelativePath(source, p6)); fs.mkdirs(p7); fs.mkdirs(p8); createFile(fs, p9); expectedValues.put(fs.makeQualified(p9).toString(), PathUtil.getRelativePath(source, p9)); final URI uri = fs.getUri(); Path fileSystemPath = new Path(uri.toString()); source = new Path(fileSystemPath.toString(), source); URI target = URI.create("s3: Path listingPath = new Path(fileSystemPath.toString() + "/" + temporaryRoot + "/META/fileList.seq"); listing.buildListing(listingPath, options(source, target)); try (SequenceFile.Reader reader = new SequenceFile.Reader(config, SequenceFile.Reader.file(listingPath))) { Text key = new Text(); CopyListingFileStatus value = new CopyListingFileStatus(); Map<String, String> actualValues = new HashMap<>(); while (reader.next(key, value)) { if (key.toString().equals("")) { continue; } actualValues.put(value.getPath().toString(), key.toString()); } assertThat(actualValues.size(), is(expectedValues.size())); for (Map.Entry<String, String> entry : actualValues.entrySet()) { assertThat(entry.getValue(), is(expectedValues.get(entry.getKey()))); } } } |
### Question:
SimpleCopyListing extends CopyListing { @Override protected long getNumberOfPaths() { return totalPaths; } protected SimpleCopyListing(Configuration configuration, Credentials credentials); @Override void doBuildListing(Path pathToListingFile, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options, List<Path> globbedPaths); static final String CONF_LABEL_ROOT_PATH; }### Answer:
@Test(timeout = 10000) public void skipFlagFiles() throws Exception { FileSystem fs = FileSystem.get(config); Path source = new Path(temporaryRoot + "/in4"); URI target = URI.create("s3: createFile(fs, new Path(source, "1/_SUCCESS")); createFile(fs, new Path(source, "1/file")); createFile(fs, new Path(source, "2")); Path listingPath = new Path(temporaryRoot + "/list4"); listing.buildListing(listingPath, options(source, target)); assertThat(listing.getNumberOfPaths(), is(2L)); Set<String> expectedRelativePaths = Sets.newHashSet("/1/file", "/2"); try (SequenceFile.Reader reader = new SequenceFile.Reader(config, SequenceFile.Reader.file(listingPath))) { CopyListingFileStatus fileStatus = new CopyListingFileStatus(); Text relativePath = new Text(); int relativePathCount = expectedRelativePaths.size(); for (int i = 0; i < relativePathCount; i++) { assertThat(reader.next(relativePath, fileStatus), is(true)); assertThat("Expected path not found " + relativePath.toString(), expectedRelativePaths.remove(relativePath.toString()), is(true)); } } assertThat("Expected relativePaths to be empty but was: " + expectedRelativePaths, expectedRelativePaths.isEmpty(), is(true)); } |
### Question:
SimpleCopyListing extends CopyListing { @Override public void doBuildListing(Path pathToListingFile, S3MapReduceCpOptions options) throws IOException { doBuildListing(getWriter(pathToListingFile), options); } protected SimpleCopyListing(Configuration configuration, Credentials credentials); @Override void doBuildListing(Path pathToListingFile, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options); @VisibleForTesting void doBuildListing(SequenceFile.Writer fileListWriter, S3MapReduceCpOptions options, List<Path> globbedPaths); static final String CONF_LABEL_ROOT_PATH; }### Answer:
@Test public void failOnCloseError() throws IOException { File inFile = File.createTempFile("TestCopyListingIn", null); inFile.deleteOnExit(); File outFile = File.createTempFile("TestCopyListingOut", null); outFile.deleteOnExit(); Path source = new Path(inFile.toURI()); Exception expectedEx = new IOException("boom"); SequenceFile.Writer writer = mock(SequenceFile.Writer.class); doThrow(expectedEx).when(writer).close(); Exception actualEx = null; try { listing.doBuildListing(writer, options(source, outFile.toURI())); } catch (Exception e) { actualEx = e; } Assert.assertNotNull("close writer didn't fail", actualEx); Assert.assertEquals(expectedEx, actualEx); } |
### Question:
HiveDifferences { @VisibleForTesting static TableAndMetadata sourceTableToTableAndMetadata(Table sourceTable) { return new TableAndMetadata(Warehouse.getQualifiedName(sourceTable), normaliseLocation(sourceTable.getSd().getLocation()), sourceTable); } private HiveDifferences(
ComparatorRegistry comparatorRegistry,
DiffListener diffListener,
Table sourceTable,
Iterator<Partition> sourcePartitionIterator,
Optional<Table> replicaTable,
Optional<? extends PartitionFetcher> replicaPartitionFetcher,
Function<Path, String> checksumFunction,
int partitionLimit); static Builder builder(DiffListener diffListener); void run(); }### Answer:
@Test public void sourceTableToTableAndMetadata() { Table sourceTable = TestUtils.newTable("sourceDB", "sourceTable"); TableAndMetadata sourceTableAndMetadata = HiveDifferences.sourceTableToTableAndMetadata(sourceTable); assertThat(sourceTableAndMetadata.getSourceTable(), is("sourceDB.sourceTable")); assertThat(sourceTableAndMetadata.getSourceLocation(), is(sourceTable.getSd().getLocation())); assertThat(sourceTableAndMetadata.getTable(), is(sourceTable)); } |
### Question:
HiveDifferences { @VisibleForTesting static PartitionAndMetadata sourcePartitionToPartitionAndMetadata(Partition sourcePartition) { return new PartitionAndMetadata(sourcePartition.getDbName() + "." + sourcePartition.getTableName(), normaliseLocation(sourcePartition.getSd().getLocation()), sourcePartition); } private HiveDifferences(
ComparatorRegistry comparatorRegistry,
DiffListener diffListener,
Table sourceTable,
Iterator<Partition> sourcePartitionIterator,
Optional<Table> replicaTable,
Optional<? extends PartitionFetcher> replicaPartitionFetcher,
Function<Path, String> checksumFunction,
int partitionLimit); static Builder builder(DiffListener diffListener); void run(); }### Answer:
@Test public void sourcePartitionToPartitionAndMetadata() { Partition sourcePartition = TestUtils.newPartition("sourceDB", "sourceTable", "val"); PartitionAndMetadata sourcePartitionAndMetadata = HiveDifferences .sourcePartitionToPartitionAndMetadata(sourcePartition); assertThat(sourcePartitionAndMetadata.getSourceTable(), is("sourceDB.sourceTable")); assertThat(sourcePartitionAndMetadata.getSourceLocation(), is(sourcePartition.getSd().getLocation())); assertThat(sourcePartitionAndMetadata.getPartition(), is(sourcePartition)); } |
### Question:
HiveDifferences { @VisibleForTesting static TableAndMetadata replicaTableToTableAndMetadata(Table replicaTable) { return new TableAndMetadata(replicaTable.getParameters().get(SOURCE_TABLE.parameterName()), normaliseLocation(replicaTable.getParameters().get(SOURCE_LOCATION.parameterName())), replicaTable); } private HiveDifferences(
ComparatorRegistry comparatorRegistry,
DiffListener diffListener,
Table sourceTable,
Iterator<Partition> sourcePartitionIterator,
Optional<Table> replicaTable,
Optional<? extends PartitionFetcher> replicaPartitionFetcher,
Function<Path, String> checksumFunction,
int partitionLimit); static Builder builder(DiffListener diffListener); void run(); }### Answer:
@Test public void replicaTableToTableAndMetadata() { Table sourceTable = TestUtils.newTable("sourceDB", "sourceTable"); Table replicaTable = TestUtils.newTable("replicaDB", "replicaTable"); TestUtils.setCircusTrainSourceParameters(sourceTable, replicaTable); TableAndMetadata replicaTableAndMetadata = HiveDifferences.replicaTableToTableAndMetadata(replicaTable); assertThat(replicaTableAndMetadata.getSourceTable(), is("sourceDB.sourceTable")); assertThat(replicaTableAndMetadata.getSourceLocation(), is(sourceTable.getSd().getLocation())); assertThat(replicaTableAndMetadata.getTable(), is(replicaTable)); assertThat(replicaTableAndMetadata.getTable().getDbName(), is("replicaDB")); assertThat(replicaTableAndMetadata.getTable().getTableName(), is("replicaTable")); } |
### Question:
HiveDifferences { @VisibleForTesting static PartitionAndMetadata replicaPartitionToPartitionAndMetadata(Partition replicaPartition) { return new PartitionAndMetadata(replicaPartition.getParameters().get(SOURCE_TABLE.parameterName()), normaliseLocation(replicaPartition.getParameters().get(SOURCE_LOCATION.parameterName())), replicaPartition); } private HiveDifferences(
ComparatorRegistry comparatorRegistry,
DiffListener diffListener,
Table sourceTable,
Iterator<Partition> sourcePartitionIterator,
Optional<Table> replicaTable,
Optional<? extends PartitionFetcher> replicaPartitionFetcher,
Function<Path, String> checksumFunction,
int partitionLimit); static Builder builder(DiffListener diffListener); void run(); }### Answer:
@Test public void replicaPartitionToPartitionAndMetadata() { Partition sourcePartition = TestUtils.newPartition("sourceDB", "sourceTable", "val"); Partition replicaPartition = TestUtils.newPartition("replicaDB", "replicaTable", "val"); TestUtils.setCircusTrainSourceParameters(sourcePartition, replicaPartition); PartitionAndMetadata replicaPartitionAndMetadata = HiveDifferences .replicaPartitionToPartitionAndMetadata(replicaPartition); assertThat(replicaPartitionAndMetadata.getSourceTable(), is("sourceDB.sourceTable")); assertThat(replicaPartitionAndMetadata.getSourceLocation(), is(sourcePartition.getSd().getLocation())); assertThat(replicaPartitionAndMetadata.getPartition(), is(replicaPartition)); assertThat(replicaPartitionAndMetadata.getPartition().getDbName(), is("replicaDB")); assertThat(replicaPartitionAndMetadata.getPartition().getTableName(), is("replicaTable")); } |
### Question:
HiveDifferences { public static Builder builder(DiffListener diffListener) { return new Builder(diffListener); } private HiveDifferences(
ComparatorRegistry comparatorRegistry,
DiffListener diffListener,
Table sourceTable,
Iterator<Partition> sourcePartitionIterator,
Optional<Table> replicaTable,
Optional<? extends PartitionFetcher> replicaPartitionFetcher,
Function<Path, String> checksumFunction,
int partitionLimit); static Builder builder(DiffListener diffListener); void run(); }### Answer:
@Test(expected = IllegalStateException.class) public void requiredPartitionFetcher() { HiveDifferences .builder(diffListener) .comparatorRegistry(comparatorRegistry) .source(sourceConfiguration, sourceTable, sourcePartitionIterable) .replica(Optional.of(replicaTable), Optional.<PartitionFetcher> absent()) .checksumFunction(checksumFunction) .build(); } |
### Question:
CleanTableFunction implements Function<TableAndMetadata, TableAndMetadata> { @Override public TableAndMetadata apply(TableAndMetadata tableAndMetadata) { if (tableAndMetadata == null) { return null; } Table tableCopy = tableAndMetadata.getTable().deepCopy(); for (CircusTrainTableParameter p : CircusTrainTableParameter.values()) { tableCopy.getParameters().remove(p.parameterName()); } for (ExcludedHiveParameter p : ExcludedHiveParameter.values()) { tableCopy.getParameters().remove(p.parameterName()); } return new TableAndMetadata(tableAndMetadata.getSourceTable(), tableAndMetadata.getSourceLocation(), tableCopy); } @Override TableAndMetadata apply(TableAndMetadata tableAndMetadata); }### Answer:
@Test public void verbatimCopy() { TableAndMetadata table = newTableAndMetadata(DATABASE, TABLE); TableAndMetadata tableCopy = function.apply(table); assertFalse(tableCopy == table); assertThat(tableCopy, is(table)); }
@Test public void cleansedCopy() { TableAndMetadata table = newTableAndMetadata(DATABASE, TABLE); for (CircusTrainTableParameter p : CircusTrainTableParameter.values()) { table.getTable().getParameters().put(p.parameterName(), "sourceTable"); } for (ExcludedHiveParameter p : ExcludedHiveParameter.values()) { table.getTable().getParameters().put(p.parameterName(), "1234567890"); } TableAndMetadata tableCopy = function.apply(table); assertFalse(tableCopy == table); assertThat(tableCopy, is(not(table))); assertThat(tableCopy.getTable().getDbName(), is(DATABASE)); assertThat(tableCopy.getTable().getTableName(), is(TABLE)); assertThat(tableCopy.getTable().getCreateTime(), is(CREATE_TIME)); assertThat(tableCopy.getTable().getOwner(), is(OWNER)); assertThat(tableCopy.getTable().getPrivileges().getUserPrivileges().size(), is(1)); assertThat(tableCopy.getTable().getPrivileges().getUserPrivileges().get("read"), is(notNullValue())); assertThat(tableCopy.getTable().getSd().getCols(), is(COLS)); assertThat(tableCopy.getTable().getSd().getInputFormat(), is(INPUT_FORMAT)); assertThat(tableCopy.getTable().getSd().getOutputFormat(), is(OUTPUT_FORMAT)); assertThat(tableCopy.getTable().getParameters().size(), is(1)); assertThat(tableCopy.getTable().getParameters().get("com.company.parameter"), is("abc")); } |
### Question:
CleanPartitionFunction implements Function<PartitionAndMetadata, PartitionAndMetadata> { @Override public PartitionAndMetadata apply(PartitionAndMetadata partitionAndMetadata) { if (partitionAndMetadata == null) { return null; } Partition partitionCopy = partitionAndMetadata.getPartition().deepCopy(); for (CircusTrainTableParameter p : CircusTrainTableParameter.values()) { partitionCopy.getParameters().remove(p.parameterName()); } for (ExcludedHiveParameter p : ExcludedHiveParameter.values()) { partitionCopy.getParameters().remove(p.parameterName()); } return new PartitionAndMetadata(partitionAndMetadata.getSourceTable(), partitionAndMetadata.getSourceLocation(), partitionCopy); } @Override PartitionAndMetadata apply(PartitionAndMetadata partitionAndMetadata); }### Answer:
@Test public void verbatimCopy() { PartitionAndMetadata partition = newPartitionAndMetadata(DATABASE, TABLE, "val"); PartitionAndMetadata partitionCopy = function.apply(partition); assertFalse(partitionCopy == partition); assertThat(partitionCopy, is(partition)); }
@Test public void cleansedCopy() { PartitionAndMetadata partition = newPartitionAndMetadata(DATABASE, TABLE, "val"); for (CircusTrainTableParameter p : CircusTrainTableParameter.values()) { partition.getPartition().getParameters().put(p.parameterName(), "sourceTable"); } for (ExcludedHiveParameter p : ExcludedHiveParameter.values()) { partition.getPartition().getParameters().put(p.parameterName(), "1234567890"); } PartitionAndMetadata partitionCopy = function.apply(partition); assertFalse(partitionCopy == partition); assertThat(partitionCopy, is(not(partition))); assertThat(partitionCopy.getPartition().getDbName(), is(DATABASE)); assertThat(partitionCopy.getPartition().getTableName(), is(TABLE)); assertThat(partitionCopy.getPartition().getCreateTime(), is(CREATE_TIME)); List<String> partitionValues = ImmutableList.of("val"); assertThat(partitionCopy.getPartition().getValues(), is(partitionValues)); assertThat(partitionCopy.getPartition().getPrivileges().getUserPrivileges().size(), is(1)); assertThat(partitionCopy.getPartition().getPrivileges().getUserPrivileges().get("read"), is(notNullValue())); assertThat(partitionCopy.getPartition().getSd().getCols(), is(COLS)); assertThat(partitionCopy.getPartition().getSd().getInputFormat(), is(INPUT_FORMAT)); assertThat(partitionCopy.getPartition().getSd().getOutputFormat(), is(OUTPUT_FORMAT)); assertThat(partitionCopy.getPartition().getParameters().size(), is(1)); assertThat(partitionCopy.getPartition().getParameters().get("com.company.parameter"), is("abc")); } |
### Question:
PathDigest implements Function<PathMetadata, String> { @Override public String apply(PathMetadata pathDescriptor) { byte[] checksum = messageDigest.digest(serialize(pathDescriptor)); return Base64.encodeBase64String(checksum); } PathDigest(); PathDigest(String algorithm); @Override String apply(PathMetadata pathDescriptor); static byte[] serialize(PathMetadata pathDescriptor); static final String DEFAULT_MESSAGE_DIGEST_ALGORITHM; }### Answer:
@Test public void typical() throws Exception { when(path.toUri()).thenReturn(new URI(FILE_LOCATION)); when(checksum.getAlgorithmName()).thenReturn(MD5); when(checksum.getLength()).thenReturn(1); when(checksum.getBytes()).thenReturn(new byte[] {}); PathMetadata pathDescriptor = new PathMetadata(path, LAST_MODIFIED, checksum, ImmutableList.<PathMetadata> of()); String base64Digest = function.apply(pathDescriptor); assertThat(base64Digest.matches(BASE_64_REGEX), is(true)); }
@Test public void noChecksum() throws Exception { when(path.toUri()).thenReturn(new URI(FILE_LOCATION)); PathMetadata pathDescriptor = new PathMetadata(path, LAST_MODIFIED, checksum, ImmutableList.<PathMetadata> of()); String base64Digest = function.apply(pathDescriptor); assertThat(base64Digest.matches(BASE_64_REGEX), is(true)); } |
### Question:
PathToPathMetadata implements Function<Path, PathMetadata> { @Override public PathMetadata apply(@Nonnull Path location) { try { FileSystem fs = location.getFileSystem(conf); FileStatus fileStatus = fs.getFileStatus(location); FileChecksum checksum = null; if (fileStatus.isFile()) { checksum = fs.getFileChecksum(location); } long modificationTime = 0; List<PathMetadata> childPathDescriptors = new ArrayList<>(); if (fileStatus.isDirectory()) { FileStatus[] childStatuses = fs.listStatus(location); for (FileStatus childStatus : childStatuses) { childPathDescriptors.add(apply(childStatus.getPath())); } } else { modificationTime = fileStatus.getModificationTime(); } return new PathMetadata(location, modificationTime, checksum, childPathDescriptors); } catch (IOException e) { throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e); } } PathToPathMetadata(Configuration conf); @Override PathMetadata apply(@Nonnull Path location); }### Answer:
@Test public void file() throws Exception { when(fileStatus.isFile()).thenReturn(true); when(fileStatus.isDirectory()).thenReturn(false); when(fs.getFileChecksum(path)).thenReturn(fileChecksum); PathMetadata metadata = function.apply(path); assertThat(metadata.getLocation(), is(DIR_PATH)); assertThat(metadata.getLastModifiedTimestamp(), is(LAST_MODIFIED)); assertThat(metadata.getChecksumAlgorithmName(), is(CHECKSUM_ALGORITHM)); assertThat(metadata.getChecksumLength(), is(CHECKSUM_LENGTH)); assertThat(metadata.getChecksum(), is(CHECKSUM_BYTES)); assertThat(metadata.getChildrenMetadata().isEmpty(), is(true)); verify(fs, never()).listStatus(any(Path.class)); }
@Test public void directory() throws Exception { when(fileStatus.isFile()).thenReturn(false); when(fileStatus.isDirectory()).thenReturn(true); Path childPath = mock(Path.class); when(childPath.toUri()).thenReturn(new URI(FILE_PATH)); when(childPath.getFileSystem(any(Configuration.class))).thenReturn(fs); FileStatus childStatus = mock(FileStatus.class); when(childStatus.getPath()).thenReturn(childPath); when(childStatus.getModificationTime()).thenReturn(LAST_MODIFIED_CHILD); when(childStatus.isFile()).thenReturn(true); when(childStatus.isDirectory()).thenReturn(false); when(fs.getFileStatus(childPath)).thenReturn(childStatus); when(fs.getFileChecksum(childPath)).thenReturn(fileChecksum); FileStatus[] childStatuses = new FileStatus[] { childStatus }; when(fs.listStatus(path)).thenReturn(childStatuses); PathMetadata metadata = function.apply(path); assertThat(metadata.getLocation(), is(DIR_PATH)); assertThat(metadata.getLastModifiedTimestamp(), is(0L)); assertThat(metadata.getChecksumAlgorithmName(), is(nullValue())); assertThat(metadata.getChecksumLength(), is(0)); assertThat(metadata.getChecksum(), is(nullValue())); assertThat(metadata.getChildrenMetadata().size(), is(1)); verify(fs, times(1)).listStatus(path); verify(fs, never()).getFileChecksum(path); PathMetadata childMetadata = metadata.getChildrenMetadata().get(0); assertThat(childMetadata.getLocation(), is(FILE_PATH)); assertThat(childMetadata.getLastModifiedTimestamp(), is(LAST_MODIFIED_CHILD)); assertThat(childMetadata.getChecksumAlgorithmName(), is(CHECKSUM_ALGORITHM)); assertThat(childMetadata.getChecksumLength(), is(CHECKSUM_LENGTH)); assertThat(childMetadata.getChecksum(), is(CHECKSUM_BYTES)); assertThat(childMetadata.getChildrenMetadata().size(), is(0)); verify(fs, never()).listStatus(childPath); verify(fs, times(1)).getFileChecksum(childPath); } |
### Question:
ComparatorRegistry { public Comparator<?, ?> comparatorFor(Class<?> type) { return registry.get(type); } ComparatorRegistry(ComparatorType comparatorType); ComparatorType getComparatorType(); Comparator<?, ?> comparatorFor(Class<?> type); }### Answer:
@Test public void typical() { Comparator<?, ?> objectComparator = comparatorRegistry.comparatorFor(FieldSchema.class); assertThat(objectComparator, is(instanceOf(FieldSchemaComparator.class))); Comparator<?, ?> integerComparator = comparatorRegistry.comparatorFor(PartitionAndMetadata.class); assertThat(integerComparator, is(instanceOf(PartitionAndMetadataComparator.class))); Comparator<?, ?> stringComparator = comparatorRegistry.comparatorFor(TableAndMetadata.class); assertThat(stringComparator, is(instanceOf(TableAndMetadataComparator.class))); assertThat(comparatorRegistry.comparatorFor(Table.class), is(nullValue())); assertThat(comparatorRegistry.comparatorFor(Partition.class), is(nullValue())); } |
### Question:
S3S3CopierFactory implements CopierFactory { @Override public boolean supportsSchemes(String sourceScheme, String replicaScheme) { return S3Schemes.isS3Scheme(sourceScheme) && S3Schemes.isS3Scheme(replicaScheme); } @Autowired S3S3CopierFactory(
AmazonS3ClientFactory clientFactory,
ListObjectsRequestFactory listObjectsRequestFactory,
TransferManagerFactory transferManagerFactory,
MetricRegistry runningMetricsRegistry); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); @Override Copier newInstance(CopierContext copierContext); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
Path replicaLocation,
Map<String, Object> copierOptions); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
List<Path> sourceSubLocations,
Path replicaLocation,
Map<String, Object> copierOptions); }### Answer:
@Test public void supportsSchemes() throws Exception { assertTrue(factory.supportsSchemes("s3", "s3")); assertTrue(factory.supportsSchemes("s3a", "s3a")); assertTrue(factory.supportsSchemes("s3n", "s3n")); assertTrue(factory.supportsSchemes("s3", "s3n")); }
@Test public void unsupportedSchemes() throws Exception { assertFalse(factory.supportsSchemes("s3", "s345")); assertFalse(factory.supportsSchemes("s345", "s3")); assertFalse(factory.supportsSchemes("hdfs", "s3")); assertFalse(factory.supportsSchemes("s3", "hdfs")); assertFalse(factory.supportsSchemes(null, null)); assertFalse(factory.supportsSchemes("", "")); } |
### Question:
AbstractComparator implements Comparator<T, D> { protected boolean checkForInequality(Object left, Object right) { if (left == right) { return false; } if (left == null) { return true; } return !left.equals(right); } AbstractComparator(ComparatorRegistry comparatorRegistry, ComparatorType comparatorType); }### Answer:
@Test public void checkForInequalityOfSameObject() { EqualObject object = new EqualObject(); assertThat(comparator.checkForInequality(object, object), is(false)); }
@Test public void checkForInequalityLeftNull() { assertThat(comparator.checkForInequality(null, new EqualObject()), is(true)); }
@Test public void checkForInequalityRightNull() { assertThat(comparator.checkForInequality(new EqualObject(), null), is(true)); }
@Test public void checkForInequalityDifferentObject() { Object objectA = new Object(); Object objectB = new Object(); assertThat(comparator.checkForInequality(objectA, objectB), is(true)); }
@Test public void checkForInequalityDifferentObjectEquals() { EqualObject objectA = new EqualObject(); EqualObject objectB = new EqualObject(); assertThat(comparator.checkForInequality(objectA, objectB), is(false)); } |
### Question:
PartitionSpecCreatingDiffListener implements DiffListener { @Override public void onNewPartition(String partitionName, Partition partition) { addPartition(partition); } PartitionSpecCreatingDiffListener(Configuration conf); @Override void onDiffStart(TableAndMetadata source, Optional<TableAndMetadata> replica); @Override void onChangedTable(List<Diff<Object, Object>> differences); @Override void onNewPartition(String partitionName, Partition partition); @Override void onChangedPartition(String partitionName, Partition partition, List<Diff<Object, Object>> differences); @Override void onDataChanged(String partitionName, Partition partition); @Override void onDiffEnd(); String getPartitionSpecFilter(); }### Answer:
@Test public void onNewPartition() throws Exception { Partition partition1 = new Partition(Lists.newArrayList("val1", "val2"), DB, TABLE, 1, 1, null, null); Partition partition2 = new Partition(Lists.newArrayList("val11", "val22"), DB, TABLE, 1, 1, null, null); listener.onDiffStart(source, replica); listener.onNewPartition("p1", partition1); listener.onNewPartition("p2", partition2); assertThat(listener.getPartitionSpecFilter(), is("(p1='val1' AND p2=val2) OR (p1='val11' AND p2=val22)")); } |
### Question:
PartitionSpecCreatingDiffListener implements DiffListener { @Override public void onChangedPartition(String partitionName, Partition partition, List<Diff<Object, Object>> differences) { addPartition(partition); } PartitionSpecCreatingDiffListener(Configuration conf); @Override void onDiffStart(TableAndMetadata source, Optional<TableAndMetadata> replica); @Override void onChangedTable(List<Diff<Object, Object>> differences); @Override void onNewPartition(String partitionName, Partition partition); @Override void onChangedPartition(String partitionName, Partition partition, List<Diff<Object, Object>> differences); @Override void onDataChanged(String partitionName, Partition partition); @Override void onDiffEnd(); String getPartitionSpecFilter(); }### Answer:
@Test public void onChangedPartition() throws Exception { Partition partition1 = new Partition(Lists.newArrayList("val1", "val2"), DB, TABLE, 1, 1, null, null); Partition partition2 = new Partition(Lists.newArrayList("val11", "val22"), DB, TABLE, 1, 1, null, null); listener.onDiffStart(source, replica); listener.onChangedPartition("p1", partition1, differences); listener.onChangedPartition("p2", partition2, differences); assertThat(listener.getPartitionSpecFilter(), is("(p1='val1' AND p2=val2) OR (p1='val11' AND p2=val22)")); } |
### Question:
PartitionSpecCreatingDiffListener implements DiffListener { @Override public void onDataChanged(String partitionName, Partition partition) { addPartition(partition); } PartitionSpecCreatingDiffListener(Configuration conf); @Override void onDiffStart(TableAndMetadata source, Optional<TableAndMetadata> replica); @Override void onChangedTable(List<Diff<Object, Object>> differences); @Override void onNewPartition(String partitionName, Partition partition); @Override void onChangedPartition(String partitionName, Partition partition, List<Diff<Object, Object>> differences); @Override void onDataChanged(String partitionName, Partition partition); @Override void onDiffEnd(); String getPartitionSpecFilter(); }### Answer:
@Test public void onDataChanged() throws Exception { Partition partition1 = new Partition(Lists.newArrayList("val1", "val2"), DB, TABLE, 1, 1, null, null); Partition partition2 = new Partition(Lists.newArrayList("val11", "val22"), DB, TABLE, 1, 1, null, null); listener.onDiffStart(source, replica); listener.onDataChanged("p1", partition1); listener.onDataChanged("p2", partition2); assertThat(listener.getPartitionSpecFilter(), is("(p1='val1' AND p2=val2) OR (p1='val11' AND p2=val22)")); } |
### Question:
CircusTrainHelp { @Override public String toString() { Iterable<String> errorMessages = FluentIterable.from(errors).transform(OBJECT_ERROR_TO_TABBED_MESSAGE); StringBuilder help = new StringBuilder(500) .append("Usage: circus-train.sh --config=<config_file>[,<config_file>,...]") .append(System.lineSeparator()) .append("Errors found in the provided configuration file:") .append(System.lineSeparator()) .append(Joiner.on(System.lineSeparator()).join(errorMessages)) .append(System.lineSeparator()) .append("Configuration file help:") .append(System.lineSeparator()) .append(TAB) .append("For more information and help please refer to ") .append("https: return help.toString(); } CircusTrainHelp(List<ObjectError> errors); @Override String toString(); }### Answer:
@Test public void typical() { String expectedHelpMessage = "Usage: circus-train.sh --config=<config_file>[,<config_file>,...]\n" + "Errors found in the provided configuration file:\n" + "\tError message 1\n" + "\tError message 2\n" + "Configuration file help:\n" + "\tFor more information and help please refer to https: List<ObjectError> errors = Arrays.asList(new ObjectError("object.1", "Error message 1"), new ObjectError("object.2", "Error message 2")); String help = new CircusTrainHelp(errors).toString(); assertThat(help, is(expectedHelpMessage)); } |
### Question:
ConfigFileValidationApplicationListener implements ApplicationListener<ApplicationEnvironmentPreparedEvent> { @Override public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { String configFilesString = event.getEnvironment().getProperty("spring.config.location"); List<String> errors = new ArrayList<>(); if (StringUtils.isBlank(configFilesString)) { errors.add("No config file was specified."); } else { for (String configFileString : Splitter.on(',').split(configFilesString)) { File configFile = new File(configFileString); if (!configFile.exists()) { errors.add("Config file " + configFileString + " does not exist."); } else if (!configFile.isFile()) { errors.add("Config file " + configFileString + " is a directory."); } else if (!configFile.canRead()) { errors.add("Config file " + configFileString + " cannot be read."); } } } if (!errors.isEmpty()) { throw new ConfigFileValidationException(errors); } } @Override void onApplicationEvent(ApplicationEnvironmentPreparedEvent event); }### Answer:
@Test public void emptyProperty() { when(env.getProperty(anyString())).thenReturn(""); try { listener.onApplicationEvent(event); fail(); } catch (ConfigFileValidationException e) { String error = e.getErrors().get(0).getDefaultMessage(); assertThat(error, is("No config file was specified.")); } }
@Test public void configFileDoesNotExist() { File file = new File(temp.getRoot(), "application.yml"); when(env.getProperty(anyString())).thenReturn(file.getAbsolutePath()); try { listener.onApplicationEvent(event); fail(); } catch (ConfigFileValidationException e) { String error = e.getErrors().get(0).getDefaultMessage(); assertThat(error, containsString("Config file " + file.getAbsolutePath() + " does not exist.")); } }
@Test public void configFileIsDirectory() throws IOException { File folder = temp.newFolder("application.yml"); when(env.getProperty(anyString())).thenReturn(folder.getAbsolutePath()); try { listener.onApplicationEvent(event); fail(); } catch (ConfigFileValidationException e) { String error = e.getErrors().get(0).getDefaultMessage(); assertThat(error, containsString("Config file " + folder.getAbsolutePath() + " is a directory.")); } }
@Test public void configFileIsNotReadable() throws IOException { File file = temp.newFile("application.yml"); file.setReadable(false); when(env.getProperty(anyString())).thenReturn(file.getAbsolutePath()); try { listener.onApplicationEvent(event); fail(); } catch (ConfigFileValidationException e) { String error = e.getErrors().get(0).getDefaultMessage(); assertThat(error, containsString("Config file " + file.getAbsolutePath() + " cannot be read.")); } }
@Test public void configFileValid() throws IOException { File file = temp.newFile("application.yml"); when(env.getProperty(anyString())).thenReturn(file.getAbsolutePath()); listener.onApplicationEvent(event); } |
### Question:
PropertyExtensionPackageProvider implements ExtensionPackageProvider { @Override public Set<String> getPackageNames(ConfigurableEnvironment env) { @SuppressWarnings("unchecked") Set<String> packageNames = env.getProperty("extension-packages", Set.class); return packageNames == null ? Collections.<String>emptySet() : packageNames; } @Override Set<String> getPackageNames(ConfigurableEnvironment env); }### Answer:
@Test public void noPackagesDeclared() { when(env.getProperty("extension-packages", Set.class)).thenReturn(ImmutableSet.of()); Set<String> packages = underTest.getPackageNames(env); assertThat(packages.size(), is(0)); }
@Test public void twoPackagesDeclared() { when(env.getProperty("extension-packages", Set.class)).thenReturn(ImmutableSet.of("com.foo", "com.bar")); Set<String> packages = underTest.getPackageNames(env); assertThat(packages.size(), is(2)); Iterator<String> iterator = packages.iterator(); assertThat(iterator.next(), is("com.foo")); assertThat(iterator.next(), is("com.bar")); } |
### Question:
ExtensionInitializer implements ApplicationContextInitializer<AnnotationConfigApplicationContext> { @Override public void initialize(AnnotationConfigApplicationContext context) { Set<String> packageNames = provider.getPackageNames(context.getEnvironment()); if (packageNames.size() > 0) { LOG.info("Adding packageNames '{}' to component scan.", packageNames); context.scan(packageNames.toArray(new String[packageNames.size()])); } } ExtensionInitializer(); @VisibleForTesting ExtensionInitializer(ExtensionPackageProvider provider); @Override void initialize(AnnotationConfigApplicationContext context); }### Answer:
@Test public void noPackage() { when(provider.getPackageNames(env)).thenReturn(Collections.<String> emptySet()); underTest.initialize(context); verify(context, never()).scan(any(String[].class)); }
@Test public void singlePackages() { when(provider.getPackageNames(env)).thenReturn(Collections.singleton("com.foo.bar")); underTest.initialize(context); verify(context).scan(new String[] { "com.foo.bar" }); } |
### Question:
HiveEndpoint { public String getName() { return name; } @Autowired HiveEndpoint(String name, HiveConf hiveConf, Supplier<CloseableMetaStoreClient> metaStoreClientSupplier); String getName(); HiveConf getHiveConf(); String getMetaStoreUris(); Supplier<CloseableMetaStoreClient> getMetaStoreClientSupplier(); Database getDatabase(String database); Optional<Table> getTable(CloseableMetaStoreClient client, String database, String table); TableAndStatistics getTableAndStatistics(String database, String tableName); abstract TableAndStatistics getTableAndStatistics(TableReplication tableReplication); PartitionsAndStatistics getPartitions(Table table, String partitionPredicate, int maxPartitions); }### Answer:
@Test public void getName() throws Exception { assertThat(hiveEndpoint.getName(), is(NAME)); } |
### Question:
HiveEndpoint { public HiveConf getHiveConf() { return hiveConf; } @Autowired HiveEndpoint(String name, HiveConf hiveConf, Supplier<CloseableMetaStoreClient> metaStoreClientSupplier); String getName(); HiveConf getHiveConf(); String getMetaStoreUris(); Supplier<CloseableMetaStoreClient> getMetaStoreClientSupplier(); Database getDatabase(String database); Optional<Table> getTable(CloseableMetaStoreClient client, String database, String table); TableAndStatistics getTableAndStatistics(String database, String tableName); abstract TableAndStatistics getTableAndStatistics(TableReplication tableReplication); PartitionsAndStatistics getPartitions(Table table, String partitionPredicate, int maxPartitions); }### Answer:
@Test public void getHiveConf() throws Exception { assertThat(hiveEndpoint.getHiveConf(), is(hiveConf)); } |
### Question:
S3S3CopierFactory implements CopierFactory { @Override public Copier newInstance(CopierContext copierContext) { return new S3S3Copier(copierContext.getSourceBaseLocation(), copierContext.getSourceSubLocations(), copierContext.getReplicaLocation(), clientFactory, transferManagerFactory, listObjectsRequestFactory, runningMetricsRegistry, new S3S3CopierOptions(copierContext.getCopierOptions())); } @Autowired S3S3CopierFactory(
AmazonS3ClientFactory clientFactory,
ListObjectsRequestFactory listObjectsRequestFactory,
TransferManagerFactory transferManagerFactory,
MetricRegistry runningMetricsRegistry); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); @Override Copier newInstance(CopierContext copierContext); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
Path replicaLocation,
Map<String, Object> copierOptions); @Override Copier newInstance(
String eventId,
Path sourceBaseLocation,
List<Path> sourceSubLocations,
Path replicaLocation,
Map<String, Object> copierOptions); }### Answer:
@Test public void newInstance() throws Exception { String eventId = "eventID"; Path sourceBaseLocation = new Path("source"); Path replicaLocation = new Path("replica"); Map<String, Object> copierOptions = new HashMap<>(); Copier copier = factory.newInstance(eventId, sourceBaseLocation, replicaLocation, copierOptions); assertNotNull(copier); }
@Test public void newInstancePartitions() throws Exception { String eventId = "eventID"; Path sourceBaseLocation = new Path("source"); Path replicaLocation = new Path("replica"); Map<String, Object> copierOptions = new HashMap<>(); List<Path> subLocations = Lists.newArrayList(new Path(sourceBaseLocation, "sub")); Copier copier = factory.newInstance(eventId, sourceBaseLocation, subLocations, replicaLocation, copierOptions); assertNotNull(copier); } |
### Question:
HiveEndpoint { public Optional<Table> getTable(CloseableMetaStoreClient client, String database, String table) { Table oldReplicaTable = null; try { log.debug("Checking for existing table {}.{}", database, table); oldReplicaTable = client.getTable(database, table); log.debug("Existing table found."); } catch (NoSuchObjectException e) { log.debug("Table '{}.{}' not found.", database, table); } catch (TException e) { String message = String.format("Cannot fetch table metadata for '%s.%s'", database, table); log.error(message, e); throw new MetaStoreClientException(message, e); } return Optional.fromNullable(oldReplicaTable); } @Autowired HiveEndpoint(String name, HiveConf hiveConf, Supplier<CloseableMetaStoreClient> metaStoreClientSupplier); String getName(); HiveConf getHiveConf(); String getMetaStoreUris(); Supplier<CloseableMetaStoreClient> getMetaStoreClientSupplier(); Database getDatabase(String database); Optional<Table> getTable(CloseableMetaStoreClient client, String database, String table); TableAndStatistics getTableAndStatistics(String database, String tableName); abstract TableAndStatistics getTableAndStatistics(TableReplication tableReplication); PartitionsAndStatistics getPartitions(Table table, String partitionPredicate, int maxPartitions); }### Answer:
@Test public void getTable() throws Exception { when(metaStoreClient.getTable(DATABASE, TABLE)).thenReturn(table); when(metaStoreClient.getTableColumnStatistics(DATABASE, TABLE, COLUMN_NAMES)).thenReturn(columnStatisticsObjs); TableAndStatistics sourceTable = hiveEndpoint.getTableAndStatistics(DATABASE, TABLE); assertThat(sourceTable.getTable(), is(table)); assertThat(sourceTable.getStatistics(), is(columnStatistics)); } |
### Question:
HiveEndpoint { public PartitionsAndStatistics getPartitions(Table table, String partitionPredicate, int maxPartitions) throws TException { try (CloseableMetaStoreClient client = metaStoreClientSupplier.get()) { List<Partition> partitions = null; if (Strings.isNullOrEmpty(partitionPredicate)) { partitions = client.listPartitions(table.getDbName(), table.getTableName(), (short) maxPartitions); } else { partitions = client.listPartitionsByFilter(table.getDbName(), table.getTableName(), partitionPredicate, (short) maxPartitions); } List<String> partitionNames = getPartitionNames(table.getPartitionKeys(), partitions); List<String> columnNames = getColumnNames(table); Map<String, List<ColumnStatisticsObj>> statisticsByPartitionName = client .getPartitionColumnStatistics(table.getDbName(), table.getTableName(), partitionNames, columnNames); if (statisticsByPartitionName != null && !statisticsByPartitionName.isEmpty()) { log.debug("Retrieved column stats entries for {} partitions of table {}.{}", statisticsByPartitionName.size(), table.getDbName(), table.getTableName()); } else { log.debug("No partition column stats retrieved for table {}.{}", table.getDbName(), table.getTableName()); } return new PartitionsAndStatistics(table.getPartitionKeys(), partitions, statisticsByPartitionName); } } @Autowired HiveEndpoint(String name, HiveConf hiveConf, Supplier<CloseableMetaStoreClient> metaStoreClientSupplier); String getName(); HiveConf getHiveConf(); String getMetaStoreUris(); Supplier<CloseableMetaStoreClient> getMetaStoreClientSupplier(); Database getDatabase(String database); Optional<Table> getTable(CloseableMetaStoreClient client, String database, String table); TableAndStatistics getTableAndStatistics(String database, String tableName); abstract TableAndStatistics getTableAndStatistics(TableReplication tableReplication); PartitionsAndStatistics getPartitions(Table table, String partitionPredicate, int maxPartitions); }### Answer:
@Test public void getPartitions() throws Exception { List<Partition> filteredPartitions = Arrays.asList(partitionOneTwo); when(metaStoreClient.listPartitionsByFilter(DATABASE, TABLE, PARTITION_PREDICATE, (short) MAX_PARTITIONS)) .thenReturn(filteredPartitions); when(metaStoreClient.getPartitionColumnStatistics(DATABASE, TABLE, PARTITION_NAMES, COLUMN_NAMES)) .thenReturn(partitionStatsMap); PartitionsAndStatistics partitionsAndStatistics = hiveEndpoint.getPartitions(table, PARTITION_PREDICATE, MAX_PARTITIONS); assertThat(partitionsAndStatistics.getPartitions(), is(filteredPartitions)); assertThat(partitionsAndStatistics.getStatisticsForPartition(partitionOneTwo), is(partitionColumnStatistics)); }
@Test public void getPartitionsWithoutFilter() throws Exception { when(metaStoreClient.listPartitions(DATABASE, TABLE, (short) MAX_PARTITIONS)).thenReturn(partitions); when(metaStoreClient.getPartitionColumnStatistics(DATABASE, TABLE, PARTITION_NAMES, COLUMN_NAMES)) .thenReturn(partitionStatsMap); PartitionsAndStatistics partitionsAndStatistics = hiveEndpoint.getPartitions(table, null, MAX_PARTITIONS); assertThat(partitionsAndStatistics.getPartitions(), is(partitions)); }
@Test public void getPartitionsNoStats() throws Exception { List<Partition> filteredPartitions = Arrays.asList(partitionOneTwo); when(metaStoreClient.listPartitionsByFilter(DATABASE, TABLE, PARTITION_PREDICATE, (short) MAX_PARTITIONS)) .thenReturn(filteredPartitions); when(metaStoreClient.getPartitionColumnStatistics(DATABASE, TABLE, PARTITION_NAMES, COLUMN_NAMES)) .thenReturn(Collections.<String, List<ColumnStatisticsObj>> emptyMap()); PartitionsAndStatistics partitionsAndStatistics = hiveEndpoint.getPartitions(table, PARTITION_PREDICATE, MAX_PARTITIONS); assertThat(partitionsAndStatistics.getPartitions(), is(filteredPartitions)); assertThat(partitionsAndStatistics.getStatisticsForPartition(partitionOneTwo), is(nullValue())); } |
### Question:
StrategyBasedReplicationFactory implements ReplicationFactory { @Override public Replication newInstance(TableReplication tableReplication) { if (tableReplication.getReplicationStrategy() == ReplicationStrategy.PROPAGATE_DELETES) { String eventId = eventIdFactory.newEventId(EventIdPrefix.CIRCUS_TRAIN_DESTRUCTIVE.getPrefix()); CleanupLocationManager cleanupLocationManager = CleanupLocationManagerFactory.newInstance(eventId, housekeepingListener, replicaCatalogListener, tableReplication); return new DestructiveReplication(upsertReplicationFactory, tableReplication, eventId, new DestructiveSource(sourceMetaStoreClientSupplier, tableReplication), new DestructiveReplica(replicaMetaStoreClientSupplier, cleanupLocationManager, tableReplication)); } return upsertReplicationFactory.newInstance(tableReplication); } StrategyBasedReplicationFactory(
ReplicationFactoryImpl upsertReplicationFactory,
Supplier<CloseableMetaStoreClient> sourceMetaStoreClientSupplier,
Supplier<CloseableMetaStoreClient> replicaMetaStoreClientSupplier,
HousekeepingListener housekeepingListener,
ReplicaCatalogListener replicaCatalogListener); @Override Replication newInstance(TableReplication tableReplication); }### Answer:
@Test public void newInstance() throws Exception { StrategyBasedReplicationFactory factory = new StrategyBasedReplicationFactory(upsertReplicationFactory, sourceMetaStoreClientSupplier, replicaMetaStoreClientSupplier, housekeepingListener, replicaCatalogListener); tableReplication.setReplicationStrategy(ReplicationStrategy.PROPAGATE_DELETES); Replication replication = factory.newInstance(tableReplication); assertThat(replication, instanceOf(DestructiveReplication.class)); }
@Test public void newInstanceUpsert() throws Exception { StrategyBasedReplicationFactory factory = new StrategyBasedReplicationFactory(upsertReplicationFactory, sourceMetaStoreClientSupplier, replicaMetaStoreClientSupplier, housekeepingListener, replicaCatalogListener); tableReplication.setReplicationStrategy(ReplicationStrategy.UPSERT); factory.newInstance(tableReplication); verify(upsertReplicationFactory).newInstance(tableReplication); } |
### Question:
DefaultDataManipulatorFactoryManager implements DataManipulatorFactoryManager { @Override public DataManipulatorFactory getFactory( Path sourceTableLocation, Path replicaTableLocation, Map<String, Object> copierOptions) { String replicaLocation = replicaTableLocation.toUri().getScheme(); String sourceLocation = sourceTableLocation.toUri().getScheme(); if (copierOptions.containsKey(DATA_MANIPULATOR_FACTORY_CLASS)) { for (DataManipulatorFactory factory : dataManipulatorFactories) { final String factoryClassName = factory.getClass().getName(); if (factoryClassName.equals(copierOptions.get(DATA_MANIPULATOR_FACTORY_CLASS).toString())) { log.debug("Found DataManipulatorFactory '{}' using config", factoryClassName); return factory; } } } else { for (DataManipulatorFactory factory : dataManipulatorFactories) { if (factory.supportsSchemes(sourceLocation, replicaLocation)) { log .debug("Found DataManipulatorFactory {} for cleanup at location {}.", factory.getClass().getName(), replicaLocation); return factory; } } } throw new UnsupportedOperationException( "No DataManipulator found which can delete the data at location: " + replicaLocation); } @Autowired DefaultDataManipulatorFactoryManager(List<DataManipulatorFactory> factories); @Override DataManipulatorFactory getFactory(
Path sourceTableLocation,
Path replicaTableLocation,
Map<String, Object> copierOptions); }### Answer:
@Test public void s3ManipulatorReturnedForS3S3Copy() { replicaLocation = new Path(s3Path); dataManipulatorFactory = manager.getFactory(sourceLocation, replicaLocation, copierOptions); assertEquals(S3_S3, ((TestDataManipulatorFactory) dataManipulatorFactory).getType()); }
@Test public void awsMapReduceManipulatorReturnedForHdfsS3Copy() { sourceLocation = new Path(hdfsPath); replicaLocation = new Path(s3Path); dataManipulatorFactory = manager.getFactory(sourceLocation, replicaLocation, copierOptions); assertEquals(S3_MAPREDUCE, ((TestDataManipulatorFactory) dataManipulatorFactory).getType()); }
@Test public void hdfsManipulatorReturnedForHdfsCopy() { sourceLocation = new Path(hdfsPath); replicaLocation = new Path(hdfsPath); dataManipulatorFactory = manager.getFactory(sourceLocation, replicaLocation, copierOptions); assertEquals(HDFS, ((TestDataManipulatorFactory) dataManipulatorFactory).getType()); }
@Test public void dataManipulatorReturnedFromCopierOption() { replicaLocation = new Path(hdfsPath); TestDataManipulatorFactory testFactory = new TestDataManipulatorFactory(HDFS); manager = new DefaultDataManipulatorFactoryManager(Arrays.asList(testFactory)); copierOptions.put(DATA_MANIPULATOR_FACTORY_CLASS, testFactory.getClass().getName()); dataManipulatorFactory = manager.getFactory(sourceLocation, replicaLocation, copierOptions); assertEquals(dataManipulatorFactory, testFactory); }
@Test(expected = UnsupportedOperationException.class) public void noSupportingFactory() { replicaLocation = new Path("<path>"); dataManipulatorFactory = manager.getFactory(sourceLocation, replicaLocation, copierOptions); assertTrue(((TestDataManipulatorFactory) dataManipulatorFactory).getType() == HDFS); } |
### Question:
MoreMapUtils { public static <T extends Enum<T>> List<T> getListOfEnum( Map<?, ?> map, Object key, List<T> defaultValue, final Class<T> enumClass) { return getList(map, key, defaultValue, new Function<Object, T>() { @Override public T apply(Object input) { if (input == null) { return null; } if (enumClass.isAssignableFrom(input.getClass())) { return enumClass.cast(input); } return Enum.valueOf(enumClass, input.toString().trim().toUpperCase(Locale.ROOT)); } }); } private MoreMapUtils(); static List<T> getListOfEnum(
Map<?, ?> map,
Object key,
List<T> defaultValue,
final Class<T> enumClass); static Path getHadoopPath(Map<?, ?> map, Object key, Path defaultValue); static URI getUri(Map<?, ?> map, Object key, URI defaultValue); }### Answer:
@Test public void listOfEnumSingleStringValue() { Map<?, ?> map = newMap(KEY, ONE.name()); List<MyEnum> actualEnums = MoreMapUtils.getListOfEnum(map, KEY, null, MyEnum.class); List<MyEnum> expectedEnums = Lists.newArrayList(ONE); assertThat(actualEnums, is(expectedEnums)); }
@Test public void listOfEnumSingleValue() { Map<?, ?> map = newMap(KEY, ONE); List<MyEnum> actualEnums = MoreMapUtils.getListOfEnum(map, KEY, null, MyEnum.class); List<MyEnum> expectedEnums = Lists.newArrayList(ONE); assertThat(actualEnums, is(expectedEnums)); }
@Test public void listOfEnumMultiStringValue() { Map<?, ?> map = newMap(KEY, Lists.newArrayList(ONE.name(), TWO_AND_A_HALF.name())); List<MyEnum> actualEnums = MoreMapUtils.getListOfEnum(map, KEY, null, MyEnum.class); List<MyEnum> expectedEnums = Lists.newArrayList(ONE, TWO_AND_A_HALF); assertThat(actualEnums, is(expectedEnums)); }
@Test public void listOfEnumStringMultiValue() { Map<?, ?> map = newMap(KEY, ONE.name() + ", " + TWO_AND_A_HALF.name()); List<MyEnum> actualEnums = MoreMapUtils.getListOfEnum(map, KEY, null, MyEnum.class); List<MyEnum> expectedEnums = Lists.newArrayList(ONE, TWO_AND_A_HALF); assertThat(actualEnums, is(expectedEnums)); } |
### Question:
MoreMapUtils { public static Path getHadoopPath(Map<?, ?> map, Object key, Path defaultValue) { Object path = map.get(key); if (path == null) { return defaultValue; } if (path instanceof Path) { return (Path) path; } if (path instanceof String) { return new Path(((String) path).trim()); } throw new IllegalArgumentException( "Object '" + key + "' must be a String or a Path. Got " + path.getClass().getName()); } private MoreMapUtils(); static List<T> getListOfEnum(
Map<?, ?> map,
Object key,
List<T> defaultValue,
final Class<T> enumClass); static Path getHadoopPath(Map<?, ?> map, Object key, Path defaultValue); static URI getUri(Map<?, ?> map, Object key, URI defaultValue); }### Answer:
@Test(expected = IllegalArgumentException.class) public void invalidHadoopPath() { Map<?, ?> map = newMap(KEY, Integer.valueOf(100)); MoreMapUtils.getHadoopPath(map, KEY, null); }
@Test public void noKeyReturnsDefaultHadoopPath() { Map<?, ?> map = newMap(KEY + "_+", "abc"); Path defaultPath = new Path("myPath"); assertThat(MoreMapUtils.getHadoopPath(map, KEY, defaultPath), is(defaultPath)); }
@Test public void nullHadoopPath() { Map<?, ?> map = newMap(KEY, null); assertThat(MoreMapUtils.getHadoopPath(map, KEY, null), is(nullValue())); }
@Test public void hadoopPath() { Path path = new Path("myPath"); Map<?, ?> map = newMap(KEY, path); assertThat(MoreMapUtils.getHadoopPath(map, KEY, null), is(path)); }
@Test public void hadoopPathAsString() { Map<?, ?> map = newMap(KEY, "myStringPath"); assertThat(MoreMapUtils.getHadoopPath(map, KEY, null), is(new Path("myStringPath"))); }
@Test(expected = IllegalArgumentException.class) public void invalidPathType() { Map<?, ?> map = newMap(KEY, new Object()); MoreMapUtils.getHadoopPath(map, KEY, null); } |
### Question:
S3DataManipulatorFactory implements DataManipulatorFactory { @Override public boolean supportsSchemes(String sourceScheme, String replicaScheme) { return S3Schemes.isS3Scheme(sourceScheme) && S3Schemes.isS3Scheme(replicaScheme); } @Autowired S3DataManipulatorFactory(AmazonS3ClientFactory s3ClientFactory); @Override S3DataManipulator newInstance(Path replicaLocation, Map<String, Object> copierOptions); @Override boolean supportsSchemes(String sourceScheme, String replicaScheme); }### Answer:
@Test public void checkSupportsS3ToS3() { sourceLocation = new Path(s3Path); replicaLocation = new Path(s3Path); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertTrue(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkSupportsHdfsToS3UpperCase() { sourceLocation = new Path(s3Path.toUpperCase()); replicaLocation = new Path(s3Path.toUpperCase()); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertTrue(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportHdfs() { sourceLocation = new Path(hdfsPath); replicaLocation = new Path(hdfsPath); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportHdfsToS3() { sourceLocation = new Path(hdfsPath); replicaLocation = new Path(s3Path); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportS3ToHdfs() { sourceLocation = new Path(s3Path); replicaLocation = new Path(hdfsPath); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); }
@Test public void checkDoesntSupportRandomPaths() { sourceLocation = new Path("<path>"); replicaLocation = new Path("<path>"); sourceScheme = sourceLocation.toUri().getScheme(); replicaScheme = replicaLocation.toUri().getScheme(); assertFalse(dataManipulatorFactory.supportsSchemes(sourceScheme, replicaScheme)); } |
### Question:
MoreMapUtils { public static URI getUri(Map<?, ?> map, Object key, URI defaultValue) { Object uri = map.get(key); if (uri == null) { return defaultValue; } if (uri instanceof URI) { return (URI) uri; } if (uri instanceof String) { try { return URI.create(((String) uri).trim()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Object '" + key + "' is not a valid URI: " + uri, e); } } throw new IllegalArgumentException( "Object '" + key + "' must be a String or a URI. Got " + uri.getClass().getName()); } private MoreMapUtils(); static List<T> getListOfEnum(
Map<?, ?> map,
Object key,
List<T> defaultValue,
final Class<T> enumClass); static Path getHadoopPath(Map<?, ?> map, Object key, Path defaultValue); static URI getUri(Map<?, ?> map, Object key, URI defaultValue); }### Answer:
@Test(expected = IllegalArgumentException.class) public void invalidUri() { Map<?, ?> map = newMap(KEY, "s3:"); MoreMapUtils.getUri(map, KEY, null); }
@Test public void noKeyReturnsDefaultUri() { Map<?, ?> map = newMap(KEY + "_+", "abc"); URI defaultUri = URI.create("my-uri"); assertThat(MoreMapUtils.getUri(map, KEY, defaultUri), is(defaultUri)); }
@Test public void nullUri() { Map<?, ?> map = newMap(KEY, null); assertThat(MoreMapUtils.getUri(map, KEY, null), is(nullValue())); }
@Test public void uri() { URI uri = URI.create("http: Map<?, ?> map = newMap(KEY, uri); assertThat(MoreMapUtils.getUri(map, KEY, null), is(uri)); }
@Test public void uriAsString() { Map<?, ?> map = newMap(KEY, "ftp: assertThat(MoreMapUtils.getUri(map, KEY, null), is(URI.create("ftp: }
@Test(expected = IllegalArgumentException.class) public void invalidUriType() { Map<?, ?> map = newMap(KEY, new Object()); MoreMapUtils.getUri(map, KEY, null); } |
### Question:
UnpartitionedTableMetadataMirrorReplication implements Replication { @Override public void replicate() throws CircusTrainException { try { replica.validateReplicaTable(replicaDatabaseName, replicaTableName); TableAndStatistics sourceTableAndStatistics = source.getTableAndStatistics(database, table); Table sourceTable = sourceTableAndStatistics.getTable(); SourceLocationManager sourceLocationManager = source.getLocationManager(sourceTable, eventId); ReplicaLocationManager replicaLocationManager = new MetadataMirrorReplicaLocationManager(sourceLocationManager, TableType.UNPARTITIONED); sourceLocationManager.cleanUpLocations(); replica .updateMetadata(eventId, sourceTableAndStatistics, replicaDatabaseName, replicaTableName, replicaLocationManager); LOG.info("Metadata mirrored for table {}.{} (no data copied).", database, table); } catch (Throwable t) { throw new CircusTrainException("Unable to replicate", t); } } UnpartitionedTableMetadataMirrorReplication(
String database,
String table,
Source source,
Replica replica,
EventIdFactory eventIdFactory,
String replicaDatabaseName,
String replicaTableName); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void typical() throws Exception { UnpartitionedTableMetadataMirrorReplication replication = new UnpartitionedTableMetadataMirrorReplication(DATABASE, TABLE, source, replica, eventIdFactory, DATABASE, TABLE); replication.replicate(); InOrder replicationOrder = inOrder(sourceLocationManager, replica); replicationOrder.verify(replica).validateReplicaTable(DATABASE, TABLE); replicationOrder.verify(sourceLocationManager).cleanUpLocations(); replicationOrder.verify(replica).updateMetadata(eq(EVENT_ID), eq(sourceTableAndStatistics), eq(DATABASE), eq(TABLE), any(ReplicaLocationManager.class)); } |
### Question:
DefaultCopierFactoryManager implements com.hotels.bdp.circustrain.api.copier.CopierFactoryManager { @Override public CopierFactory getCopierFactory(Path sourceLocation, Path replicaLocation, Map<String, Object> copierOptions) { String sourceScheme = sourceLocation.toUri().getScheme(); String replicaScheme = replicaLocation.toUri().getScheme(); if (copierOptions.containsKey(COPIER_FACTORY_CLASS)) { for (CopierFactory copierFactory : copierFactories) { final String copierFactoryClassName = copierFactory.getClass().getName(); if (copierFactoryClassName.equals(copierOptions.get(COPIER_FACTORY_CLASS).toString())) { LOG.debug("Found CopierFactory '{}' using config", copierFactoryClassName); return copierFactory; } } } else { for (CopierFactory copierFactory : copierFactories) { final String copierFactoryClassName = copierFactory.getClass().getName(); if (copierFactory.supportsSchemes(sourceScheme, replicaScheme)) { LOG .debug("Found CopierFactory '{}' for sourceScheme '{}' and replicaScheme '{}'", copierFactoryClassName, sourceScheme, replicaScheme); return copierFactory; } } } throw new UnsupportedOperationException("No CopierFactory that suppports sourceScheme '" + sourceScheme + "' and replicaScheme '" + replicaScheme + "'"); } @Autowired DefaultCopierFactoryManager(List<CopierFactory> copierFactories); @Override CopierFactory getCopierFactory(Path sourceLocation, Path replicaLocation, Map<String, Object> copierOptions); }### Answer:
@Test public void supportsScheme() { defaultCopierFactoryManager = new DefaultCopierFactoryManager(Arrays.asList(copierFactory)); when(copierFactory.supportsSchemes(SCHEME, SCHEME)).thenReturn(true); CopierFactory copierFactoryResult = defaultCopierFactoryManager.getCopierFactory(path, path, ImmutableMap.<String, Object> of()); assertEquals(copierFactory, copierFactoryResult); }
@Test(expected = UnsupportedOperationException.class) public void doesNotSupportScheme() { defaultCopierFactoryManager = new DefaultCopierFactoryManager(Arrays.asList(copierFactory)); when(copierFactory.supportsSchemes(SCHEME, SCHEME)).thenReturn(false); defaultCopierFactoryManager.getCopierFactory(path, path, ImmutableMap.<String, Object> of()); }
@Test public void supportsSchemeWithCopierFactoryClass() { CopierFactory testCopierFactory = new TestCopierFactory(); defaultCopierFactoryManager = new DefaultCopierFactoryManager(Arrays.asList(testCopierFactory)); CopierFactory copierFactoryResult = defaultCopierFactoryManager.getCopierFactory(path, path, ImmutableMap.<String, Object> of("copier-factory-class", testCopierFactory.getClass().getName())); assertEquals(copierFactoryResult, testCopierFactory); }
@Test(expected = UnsupportedOperationException.class) public void supportsSchemeWithCopierFactoryClassNotFound() { CopierFactory testCopierFactory = new TestCopierFactory(); defaultCopierFactoryManager = new DefaultCopierFactoryManager(Arrays.asList(testCopierFactory)); defaultCopierFactoryManager.getCopierFactory(path, path, ImmutableMap.<String, Object> of("copier-factory-class", "test")); } |
### Question:
SpelParsedPartitionPredicate implements PartitionPredicate { @Override public String getPartitionPredicate() { String partitionFilter = tableReplication.getSourceTable().getPartitionFilter(); String parsedPartitionFilter = expressionParser.parse(partitionFilter); if (!Objects.equals(partitionFilter, parsedPartitionFilter)) { LOG.info("Parsed partitionFilter: " + parsedPartitionFilter); } return parsedPartitionFilter; } SpelParsedPartitionPredicate(SpringExpressionParser expressionParser, TableReplication tableReplication); @Override String getPartitionPredicate(); @Override short getPartitionPredicateLimit(); }### Answer:
@Test public void simpleFilter() throws Exception { when(sourceTable.getPartitionFilter()).thenReturn("filter"); when(expressionParser.parse("filter")).thenReturn("filter"); assertThat(predicate.getPartitionPredicate(), is("filter")); }
@Test public void expressionParserChangedFilter() throws Exception { when(expressionParser.parse("filter")).thenReturn("filter2"); when(sourceTable.getPartitionFilter()).thenReturn("filter"); assertThat(predicate.getPartitionPredicate(), is("filter2")); } |
### Question:
SpelParsedPartitionPredicate implements PartitionPredicate { @Override public short getPartitionPredicateLimit() { Short partitionLimit = tableReplication.getSourceTable().getPartitionLimit(); return partitionLimit == null ? -1 : partitionLimit; } SpelParsedPartitionPredicate(SpringExpressionParser expressionParser, TableReplication tableReplication); @Override String getPartitionPredicate(); @Override short getPartitionPredicateLimit(); }### Answer:
@Test public void partitionPredicateLimit() throws Exception { when(sourceTable.getPartitionLimit()).thenReturn((short) 10); assertThat(predicate.getPartitionPredicateLimit(), is((short) 10)); }
@Test public void noPartitionPredicateLimitSetDefaultsToMinus1() throws Exception { when(sourceTable.getPartitionLimit()).thenReturn(null); assertThat(predicate.getPartitionPredicateLimit(), is((short) -1)); } |
### Question:
CompositePartitionTransformation implements PartitionTransformation { @Override public Partition transform(Partition partition) { Partition transformedPartition = partition; for (PartitionTransformation partitionTransformation : partitionTransformations) { transformedPartition = partitionTransformation.transform(transformedPartition); } return transformedPartition; } CompositePartitionTransformation(List<PartitionTransformation> partitionTransformations); @Override Partition transform(Partition partition); }### Answer:
@Test public void transform() throws Exception { transformations.add(transformationOne); transformations.add(transformationTwo); partitionTransformations = new CompositePartitionTransformation(transformations); partitionTransformations.transform(null); verify(transformationOne).transform(null); verify(transformationTwo).transform(null); } |
### Question:
CompositeTableTransformation implements TableTransformation { @Override public Table transform(Table table) { Table transformedTable = table; for (TableTransformation tableTransformation : tableTransformations) { transformedTable = tableTransformation.transform(transformedTable); } return transformedTable; } CompositeTableTransformation(List<TableTransformation> tableTransformations); @Override Table transform(Table table); }### Answer:
@Test public void transform() throws Exception { transformations.add(transformationOne); transformations.add(transformationTwo); tableTransformations = new CompositeTableTransformation(transformations); tableTransformations.transform(null); verify(transformationOne).transform(null); verify(transformationTwo).transform(null); } |
### Question:
PartitionedTableMetadataMirrorReplication implements Replication { @Override public void replicate() throws CircusTrainException { try { TableAndStatistics sourceTableAndStatistics = source.getTableAndStatistics(database, table); Table sourceTable = sourceTableAndStatistics.getTable(); PartitionsAndStatistics sourcePartitionsAndStatistics = source .getPartitions(sourceTable, partitionPredicate.getPartitionPredicate(), partitionPredicate.getPartitionPredicateLimit()); List<Partition> sourcePartitions = sourcePartitionsAndStatistics.getPartitions(); replica.validateReplicaTable(replicaDatabaseName, replicaTableName); SourceLocationManager sourceLocationManager = source .getLocationManager(sourceTable, sourcePartitions, eventId, Collections.<String, Object>emptyMap()); ReplicaLocationManager replicaLocationManager = new MetadataMirrorReplicaLocationManager(sourceLocationManager, TableType.PARTITIONED); sourceLocationManager.cleanUpLocations(); if (sourcePartitions.isEmpty()) { LOG.debug("Update table {}.{} metadata only", database, table); replica .updateMetadata(eventId, sourceTableAndStatistics, replicaDatabaseName, replicaTableName, replicaLocationManager); LOG .info( "No matching partitions found on table {}.{} with predicate {}. Table metadata updated, no partitions were updated.", database, table, partitionPredicate); } else { replica .updateMetadata(eventId, sourceTableAndStatistics, sourcePartitionsAndStatistics, replicaDatabaseName, replicaTableName, replicaLocationManager); int partitionsCopied = sourcePartitions.size(); LOG .info("Metadata mirrored for {} partitions of table {}.{} (no data copied).", partitionsCopied, database, table); } } catch (Throwable t) { throw new CircusTrainException("Unable to replicate", t); } } PartitionedTableMetadataMirrorReplication(
String database,
String table,
PartitionPredicate partitionPredicate,
Source source,
Replica replica,
EventIdFactory eventIdFactory,
String replicaDatabaseName,
String replicaTableName); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void typical() throws Exception { when(source.getPartitions(sourceTable, PARTITION_PREDICATE, MAX_PARTITIONS)).thenReturn(partitionsAndStatistics); PartitionedTableMetadataMirrorReplication replication = new PartitionedTableMetadataMirrorReplication(DATABASE, TABLE, partitionPredicate, source, replica, eventIdFactory, DATABASE, TABLE); replication.replicate(); InOrder replicationOrder = inOrder(sourceLocationManager, replica); replicationOrder.verify(replica).validateReplicaTable(DATABASE, TABLE); replicationOrder.verify(sourceLocationManager).cleanUpLocations(); replicationOrder .verify(replica) .updateMetadata(eq(EVENT_ID), eq(sourceTableAndStatistics), eq(partitionsAndStatistics), eq(DATABASE), eq(TABLE), any(ReplicaLocationManager.class)); }
@Test public void noMatchingPartitions() throws Exception { PartitionsAndStatistics emptyPartitionsAndStats = new PartitionsAndStatistics(sourceTable.getPartitionKeys(), Collections.<Partition>emptyList(), Collections.<String, List<ColumnStatisticsObj>>emptyMap()); when(source.getPartitions(sourceTable, PARTITION_PREDICATE, MAX_PARTITIONS)).thenReturn(emptyPartitionsAndStats); when(source.getLocationManager(sourceTable, Collections.<Partition>emptyList(), EVENT_ID, copierOptions)) .thenReturn(sourceLocationManager); PartitionedTableMetadataMirrorReplication replication = new PartitionedTableMetadataMirrorReplication(DATABASE, TABLE, partitionPredicate, source, replica, eventIdFactory, DATABASE, TABLE); replication.replicate(); verify(replica).validateReplicaTable(DATABASE, TABLE); verify(replica) .updateMetadata(eq(EVENT_ID), eq(sourceTableAndStatistics), eq(DATABASE), eq(TABLE), any(ReplicaLocationManager.class)); } |
### Question:
DestructiveReplication implements Replication { @Override public void replicate() throws CircusTrainException { try { if (!destructiveReplica.tableIsUnderCircusTrainControl()) { throw new CircusTrainException("Replica table '" + tableReplication.getQualifiedReplicaName() + "' is not controlled by circus train aborting replication, check configuration for correct replica name"); } if (destructiveSource.tableExists()) { destructiveReplica.dropDeletedPartitions(destructiveSource.getPartitionNames()); Replication replication = upsertReplicationFactory.newInstance(tableReplication); replication.replicate(); } else { destructiveReplica.dropTable(); } } catch (TException e) { throw new CircusTrainException(e); } } DestructiveReplication(
ReplicationFactoryImpl upsertReplicationFactory,
TableReplication tableReplication,
String eventId,
DestructiveSource destructiveSource,
DestructiveReplica destructiveReplica); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void replicateTableExists() throws Exception { when(destructiveReplica.tableIsUnderCircusTrainControl()).thenReturn(true); when(destructiveSource.tableExists()).thenReturn(true); when(upsertReplicationFactory.newInstance(tableReplication)).thenReturn(normalReplication); when(destructiveSource.getPartitionNames()).thenReturn(sourcePartitionNames); replication.replicate(); verify(destructiveReplica).dropDeletedPartitions(sourcePartitionNames); verify(normalReplication).replicate(); }
@Test public void replicateTableDoesNotExists() throws Exception { when(destructiveReplica.tableIsUnderCircusTrainControl()).thenReturn(true); when(destructiveSource.tableExists()).thenReturn(false); replication.replicate(); verifyZeroInteractions(upsertReplicationFactory); verify(destructiveReplica).dropTable(); }
@Test(expected = CircusTrainException.class) public void replicateNotCircusTrainTable() throws Exception { when(destructiveReplica.tableIsUnderCircusTrainControl()).thenReturn(false); replication.replicate(); }
@Test(expected = CircusTrainException.class) public void replicateExceptionsWrapped() throws Exception { when(destructiveReplica.tableIsUnderCircusTrainControl()).thenThrow(new TException()); replication.replicate(); } |
### Question:
DestructiveReplication implements Replication { @Override public String name() { return "destructive-" + tableReplication.getQualifiedReplicaName(); } DestructiveReplication(
ReplicationFactoryImpl upsertReplicationFactory,
TableReplication tableReplication,
String eventId,
DestructiveSource destructiveSource,
DestructiveReplica destructiveReplica); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void name() throws Exception { DestructiveReplication replication = new DestructiveReplication(upsertReplicationFactory, tableReplication, EVENT_ID, destructiveSource, destructiveReplica); assertThat(replication.name(), is("destructive-db.table2")); } |
### Question:
DestructiveReplication implements Replication { @Override public String getEventId() { return eventId; } DestructiveReplication(
ReplicationFactoryImpl upsertReplicationFactory,
TableReplication tableReplication,
String eventId,
DestructiveSource destructiveSource,
DestructiveReplica destructiveReplica); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void getEventId() throws Exception { DestructiveReplication replication = new DestructiveReplication(upsertReplicationFactory, tableReplication, EVENT_ID, destructiveSource, destructiveReplica); assertThat(replication.getEventId(), is(EVENT_ID)); } |
### Question:
UnpartitionedTableMetadataUpdateReplication implements Replication { @Override public void replicate() throws CircusTrainException { try { replica.validateReplicaTable(replicaDatabaseName, replicaTableName); try (CloseableMetaStoreClient client = replica.getMetaStoreClientSupplier().get()) { String previousLocation = getPreviousLocation(client); ReplicaLocationManager replicaLocationManager = new MetadataUpdateReplicaLocationManager(client, TableType.UNPARTITIONED, previousLocation, replicaDatabaseName, replicaTableName); TableAndStatistics sourceTableAndStatistics = source.getTableAndStatistics(database, table); replica .updateMetadata(eventId, sourceTableAndStatistics, replicaDatabaseName, replicaTableName, replicaLocationManager); } LOG.info("Metadata updated for table {}.{} (no data copied).", database, table); } catch (Throwable t) { throw new CircusTrainException("Unable to replicate", t); } } UnpartitionedTableMetadataUpdateReplication(
String database,
String table,
Source source,
Replica replica,
EventIdFactory eventIdFactory,
String replicaDatabaseName,
String replicaTableName); @Override void replicate(); @Override String name(); @Override String getEventId(); }### Answer:
@Test public void typical() throws Exception { when(replica.getTable(client, DATABASE, TABLE)).thenReturn(Optional.of(previousReplicaTable)); when(previousReplicaTable.getSd()).thenReturn(sd); when(sd.getLocation()).thenReturn(tableLocation); UnpartitionedTableMetadataUpdateReplication replication = new UnpartitionedTableMetadataUpdateReplication(DATABASE, TABLE, source, replica, eventIdFactory, DATABASE, TABLE); replication.replicate(); InOrder replicationOrder = inOrder(replica); replicationOrder.verify(replica).validateReplicaTable(DATABASE, TABLE); replicationOrder.verify(replica).updateMetadata(eq(EVENT_ID), eq(sourceTableAndStatistics), eq(DATABASE), eq(TABLE), any(ReplicaLocationManager.class)); }
@Test public void throwExceptionWhenReplicaTableDoesNotExist() throws Exception { expectedException.expect(CircusTrainException.class); expectedException.expectCause(isA(InvalidReplicationModeException.class)); when(replica.getTable(client, DATABASE, TABLE)).thenReturn(Optional.<Table> absent()); UnpartitionedTableMetadataUpdateReplication replication = new UnpartitionedTableMetadataUpdateReplication(DATABASE, TABLE, source, replica, eventIdFactory, DATABASE, TABLE); replication.replicate(); } |
### Question:
TransferManagerFactory { public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) { LOG .debug("Initializing transfer manager with {} threads.", s3s3CopierOptions.getMaxThreadPoolSize()); return TransferManagerBuilder.standard() .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold()) .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize()) .withExecutorFactory(() -> Executors.newFixedThreadPool(s3s3CopierOptions.getMaxThreadPoolSize())) .withS3Client(targetS3Client) .build(); } TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions); }### Answer:
@Test public void shouldCreateDefaultTransferManagerClient() { S3S3CopierOptions s3Options = new S3S3CopierOptions(new HashMap<String, Object>() {{ put(S3S3CopierOptions.Keys.MULTIPART_COPY_THRESHOLD.keyName(), MULTIPART_COPY_THRESHOLD_VALUE); put(S3S3CopierOptions.Keys.MULTIPART_COPY_PART_SIZE.keyName(), MULTIPART_COPY_PART_SIZE); }}); TransferManagerFactory factory = new TransferManagerFactory(); TransferManager transferManager = factory.newInstance(mockClient, s3Options); assertThat(transferManager.getAmazonS3Client(), is(mockClient)); TransferManagerConfiguration managerConfig = transferManager.getConfiguration(); assertThat(managerConfig.getMultipartCopyPartSize(), is(MULTIPART_COPY_PART_SIZE)); assertThat(managerConfig.getMultipartCopyThreshold(), is(MULTIPART_COPY_THRESHOLD_VALUE)); } |
### Question:
PartitionPredicateFactory { public PartitionPredicate newInstance(TableReplication tableReplication) { if (tableReplication.getSourceTable().isGeneratePartitionFilter()) { return new DiffGeneratedPartitionPredicate(sourceFactory.newInstance(tableReplication), replicaFactory.newInstance(tableReplication), tableReplication, checksumFunction); } else { return new SpelParsedPartitionPredicate(expressionParser, tableReplication); } } PartitionPredicateFactory(
HiveEndpointFactory<? extends HiveEndpoint> sourceFactory,
HiveEndpointFactory<? extends HiveEndpoint> replicaFactory,
SpringExpressionParser expressionParser,
Function<Path, String> checksumFunction); PartitionPredicate newInstance(TableReplication tableReplication); }### Answer:
@Test public void newInstanceSpelParsedPartitionPredicate() throws Exception { when(sourceTable.isGeneratePartitionFilter()).thenReturn(false); PartitionPredicate predicate = partitionPredicateFactory.newInstance(tableReplication); assertThat(predicate, instanceOf(SpelParsedPartitionPredicate.class)); }
@Test public void newInstanceDiffGeneratedPartitionPredicate() throws Exception { when(sourceTable.isGeneratePartitionFilter()).thenReturn(true); when(sourceFactory.newInstance(tableReplication)).thenReturn(source); when(replicaFactory.newInstance(tableReplication)).thenReturn(replica); PartitionPredicate predicate = partitionPredicateFactory.newInstance(tableReplication); assertThat(predicate, instanceOf(DiffGeneratedPartitionPredicate.class)); } |
### Question:
PartitionsAndStatistics { public ColumnStatistics getStatisticsForPartition(Partition partition) { if (partition == null) { throw new IllegalArgumentException("partition == null"); } return statisticsByPartition.get(partition); } PartitionsAndStatistics(
List<FieldSchema> partitionKeys,
List<Partition> partitions,
Map<String, List<ColumnStatisticsObj>> statisticsByPartitionName); PartitionsAndStatistics(
List<FieldSchema> partitionKeys,
Map<Partition, ColumnStatistics> statisticsByPartition); List<Partition> getPartitions(); List<FieldSchema> getPartitionKeys(); ColumnStatistics getStatisticsForPartition(Partition partition); List<String> getPartitionNames(); }### Answer:
@Test(expected = IllegalArgumentException.class) public void getStatisticsForPartitionNullArgument() { List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a")); List<Partition> partitions = Lists.newArrayList(); PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions, statisticsPerPartitionName); partitionsAndStatistics.getStatisticsForPartition(null); }
@Test public void getStatisticsForPartitionReturnsNullIfNotPresent() throws Exception { List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a")); Table table = newTable("t1", "db1", partitionKeys, newStorageDescriptor(new File("bla"), "col1")); List<Partition> partitions = Lists.newArrayList(newPartition(table, "b")); PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions, statisticsPerPartitionName); assertNull(partitionsAndStatistics.getStatisticsForPartition(partitions.get(0))); }
@Test public void getStatisticsForPartitionReturnsNullIfEmptyStats() throws Exception { List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a")); Table table = newTable("t1", "db1", partitionKeys, newStorageDescriptor(new File("bla"), "col1")); List<Partition> partitions = Lists.newArrayList(newPartition(table, "b")); statisticsPerPartitionName.put("a=b", Collections.<ColumnStatisticsObj> emptyList()); PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions, statisticsPerPartitionName); assertNull(partitionsAndStatistics.getStatisticsForPartition(partitions.get(0))); } |
### Question:
ExpressionParserFunctions { private static DateTime nowInZone(DateTimeZone zone) { return DateTime.now(zone); } private ExpressionParserFunctions(); static DateTime nowInZone(String zone); static DateTime nowUtc(); static DateTime nowEuropeLondon(); static DateTime nowAmericaLosAngeles(); static String zeroPadLeft(int value, int width); static String zeroPadLeft(String value, int width); }### Answer:
@Test public void nowInZone() { DateTime now = ExpressionParserFunctions.nowInZone("UTC"); assertThat(now.getZone(), is(DateTimeZone.UTC)); } |
### Question:
ExpressionParserFunctions { public static DateTime nowUtc() { return nowInZone(DateTimeZone.UTC); } private ExpressionParserFunctions(); static DateTime nowInZone(String zone); static DateTime nowUtc(); static DateTime nowEuropeLondon(); static DateTime nowAmericaLosAngeles(); static String zeroPadLeft(int value, int width); static String zeroPadLeft(String value, int width); }### Answer:
@Test public void nowUtc() { DateTime now = ExpressionParserFunctions.nowUtc(); assertThat(now.getZone(), is(DateTimeZone.UTC)); } |
### Question:
ExpressionParserFunctions { public static DateTime nowEuropeLondon() { return nowInZone("Europe/London"); } private ExpressionParserFunctions(); static DateTime nowInZone(String zone); static DateTime nowUtc(); static DateTime nowEuropeLondon(); static DateTime nowAmericaLosAngeles(); static String zeroPadLeft(int value, int width); static String zeroPadLeft(String value, int width); }### Answer:
@Test public void nowEuropeLondon() { DateTime now = ExpressionParserFunctions.nowEuropeLondon(); assertThat(now.getZone(), is(DateTimeZone.forID("Europe/London"))); } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.