src_fm_fc_ms_ff
stringlengths 43
86.8k
| target
stringlengths 20
276k
|
---|---|
StochasticLoadBalancer extends BaseLoadBalancer { @Override public synchronized void setClusterStatus(ClusterStatus st) { super.setClusterStatus(st); updateRegionLoad(); for(CostFromRegionLoadFunction cost : regionLoadFunctions) { cost.setClusterStatus(st); } } @Override void onConfigurationChange(Configuration conf); @Override synchronized void setConf(Configuration conf); @Override synchronized void setClusterStatus(ClusterStatus st); @Override synchronized void setMasterServices(MasterServices masterServices); @Override synchronized List<RegionPlan> balanceCluster(Map<ServerName,
List<HRegionInfo>> clusterState); } | @Test public void testKeepRegionLoad() throws Exception { ServerName sn = ServerName.valueOf("test:8080", 100); int numClusterStatusToAdd = 20000; for (int i = 0; i < numClusterStatusToAdd; i++) { ServerLoad sl = mock(ServerLoad.class); RegionLoad rl = mock(RegionLoad.class); when(rl.getStores()).thenReturn(i); Map<byte[], RegionLoad> regionLoadMap = new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR); regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl); when(sl.getRegionsLoad()).thenReturn(regionLoadMap); ClusterStatus clusterStatus = mock(ClusterStatus.class); when(clusterStatus.getServers()).thenReturn(Arrays.asList(sn)); when(clusterStatus.getLoad(sn)).thenReturn(sl); loadBalancer.setClusterStatus(clusterStatus); } assertTrue(loadBalancer.loads.get(REGION_KEY) != null); assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15); Queue<RegionLoad> loads = loadBalancer.loads.get(REGION_KEY); int i = 0; while(loads.size() > 0) { RegionLoad rl = loads.remove(); assertEquals(i + (numClusterStatusToAdd - 15), rl.getStores()); i ++; } } |
StochasticLoadBalancer extends BaseLoadBalancer { @Override public synchronized List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) { List<RegionPlan> plans = balanceMasterRegions(clusterState); if (plans != null || clusterState == null || clusterState.size() <= 1) { return plans; } if (masterServerName != null && clusterState.containsKey(masterServerName)) { if (clusterState.size() <= 2) { return null; } clusterState = new HashMap<ServerName, List<HRegionInfo>>(clusterState); clusterState.remove(masterServerName); } RegionLocationFinder finder = null; if (this.localityCost != null && this.localityCost.getMultiplier() > 0) { finder = this.regionFinder; } Cluster cluster = new Cluster(clusterState, loads, finder, rackManager); if (!needsBalance(cluster)) { return null; } long startTime = EnvironmentEdgeManager.currentTime(); initCosts(cluster); double currentCost = computeCost(cluster, Double.MAX_VALUE); double initCost = currentCost; double newCost = currentCost; long computedMaxSteps = Math.min(this.maxSteps, ((long)cluster.numRegions * (long)this.stepsPerRegion * (long)cluster.numServers)); long step; for (step = 0; step < computedMaxSteps; step++) { int generatorIdx = RANDOM.nextInt(candidateGenerators.length); CandidateGenerator p = candidateGenerators[generatorIdx]; Cluster.Action action = p.generate(cluster); if (action.type == Type.NULL) { continue; } cluster.doAction(action); updateCostsWithAction(cluster, action); newCost = computeCost(cluster, currentCost); if (newCost < currentCost) { currentCost = newCost; } else { Action undoAction = action.undoAction(); cluster.doAction(undoAction); updateCostsWithAction(cluster, undoAction); } if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } long endTime = EnvironmentEdgeManager.currentTime(); metricsBalancer.balanceCluster(endTime - startTime); if (initCost > currentCost) { plans = createRegionPlans(cluster); if (LOG.isDebugEnabled()) { LOG.debug("Finished computing new load balance plan. Computation took " + (endTime - startTime) + "ms to try " + step + " different iterations. Found a solution that moves " + plans.size() + " regions; Going from a computed cost of " + initCost + " to a new cost of " + currentCost); } return plans; } if (LOG.isDebugEnabled()) { LOG.debug("Could not find a better load balance plan. Tried " + step + " different configurations in " + (endTime - startTime) + "ms, and did not find anything with a computed cost less than " + initCost); } return null; } @Override void onConfigurationChange(Configuration conf); @Override synchronized void setConf(Configuration conf); @Override synchronized void setClusterStatus(ClusterStatus st); @Override synchronized void setMasterServices(MasterServices masterServices); @Override synchronized List<RegionPlan> balanceCluster(Map<ServerName,
List<HRegionInfo>> clusterState); } | @Test public void testBalanceCluster() throws Exception { for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); List<RegionPlan> secondPlans = loadBalancer.balanceCluster(servers); assertNull(secondPlans); for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }
@Test(timeout = 60000) public void testLosingRs() throws Exception { int numNodes = 3; int numRegions = 20; int numRegionsPerServer = 3; int replication = 1; int numTables = 2; Map<ServerName, List<HRegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); List<ServerAndLoad> list = convertToList(serverMap); List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap); assertNotNull(plans); List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap); assertClusterAsBalanced(balancedCluster); ServerName sn = serverMap.keySet().toArray(new ServerName[serverMap.size()])[0]; ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 100); serverMap.put(deadSn, new ArrayList<HRegionInfo>(0)); plans = loadBalancer.balanceCluster(serverMap); assertNull(plans); } |
StochasticLoadBalancer extends BaseLoadBalancer { @Override public synchronized void setConf(Configuration conf) { super.setConf(conf); LOG.info("loading config"); maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps); stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion); maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime); numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember); if (localityCandidateGenerator == null) { localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); } localityCost = new LocalityCostFunction(conf, services); if (candidateGenerators == null) { candidateGenerators = new CandidateGenerator[] { new RandomCandidateGenerator(), new LoadCandidateGenerator(), localityCandidateGenerator, new RegionReplicaRackCandidateGenerator(), }; } regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf), new WriteRequestCostFunction(conf), new MemstoreSizeCostFunction(conf), new StoreFileCostFunction(conf) }; regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf); regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf); costFunctions = new CostFunction[]{ new RegionCountSkewCostFunction(conf), new PrimaryRegionCountSkewCostFunction(conf), new MoveCostFunction(conf), localityCost, new TableSkewCostFunction(conf), regionReplicaHostCostFunction, regionReplicaRackCostFunction, regionLoadFunctions[0], regionLoadFunctions[1], regionLoadFunctions[2], regionLoadFunctions[3], }; } @Override void onConfigurationChange(Configuration conf); @Override synchronized void setConf(Configuration conf); @Override synchronized void setClusterStatus(ClusterStatus st); @Override synchronized void setMasterServices(MasterServices masterServices); @Override synchronized List<RegionPlan> balanceCluster(Map<ServerName,
List<HRegionInfo>> clusterState); } | @Ignore @Test (timeout = 800000) public void testRegionReplicationOnMidClusterSameHosts() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); int numHosts = 100; int numRegions = 100 * 100; int replication = 3; int numRegionsPerServer = 5; int numTables = 10; Map<ServerName, List<HRegionInfo>> serverMap = createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables); int numNodesPerHost = 4; Map<ServerName, List<HRegionInfo>> newServerMap = new TreeMap<ServerName, List<HRegionInfo>>(serverMap); for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) { for (int i=1; i < numNodesPerHost; i++) { ServerName s1 = entry.getKey(); ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 1); newServerMap.put(s2, new ArrayList<HRegionInfo>()); } } testWithCluster(newServerMap, null, true, true); }
@Test (timeout = 800000) public void testRegionReplicationOnMidClusterWithRacks() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); loadBalancer.setConf(conf); int numNodes = 30; int numRegions = numNodes * 30; int replication = 3; int numRegionsPerServer = 28; int numTables = 10; int numRacks = 4; Map<ServerName, List<HRegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithCluster(serverMap, rm, false, true); } |
FSTableDescriptors implements TableDescriptors { @Override public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; return metaTableDescriptor; } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); } if (usecache) { HTableDescriptor cachedtdm = this.cache.get(tablename); if (cachedtdm != null) { cachehits++; return cachedtdm; } } HTableDescriptor tdmt = null; try { tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); } catch (IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, ioe); } if (usecache && tdmt != null) { this.cache.put(tablename, tdmt); } return tdmt; } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir); FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache); void setCacheOn(); void setCacheOff(); @VisibleForTesting boolean isUsecache(); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htd, boolean forceCreation); } | @Test public void testNoSuchTable() throws IOException { final String name = "testNoSuchTable"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable"))); }
@Test public void testReadingInvalidDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { new FSTableDescriptors(UTIL.getConfiguration(), fs, FSUtils.getRootDir(UTIL.getConfiguration())) .get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (Exception e) { LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " + e.getMessage()); } } |
DeadServer { public synchronized boolean areDeadServersInProgress() { return processing; } synchronized boolean cleanPreviousInstance(final ServerName newServerName); synchronized boolean isDeadServer(final ServerName serverName); synchronized boolean areDeadServersInProgress(); synchronized Set<ServerName> copyServerNames(); synchronized void add(ServerName sn); synchronized void notifyServer(ServerName sn); synchronized void finish(ServerName sn); synchronized int size(); synchronized boolean isEmpty(); synchronized void cleanAllPreviousInstances(final ServerName newServerName); synchronized String toString(); synchronized List<Pair<ServerName, Long>> copyDeadServersSince(long ts); synchronized Date getTimeOfDeath(final ServerName deadServerName); } | @Test(timeout = 15000) public void testCrashProcedureReplay() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); ProcedureExecutor pExecutor = master.getMasterProcedureExecutor(); ServerCrashProcedure proc = new ServerCrashProcedure(hostname123, false, false); ProcedureTestingUtility.submitAndWait(pExecutor, proc); assertFalse(master.getServerManager().getDeadServers().areDeadServersInProgress()); } |
ClusterStatusPublisher extends ScheduledChore { protected List<ServerName> generateDeadServersListToSend() { long since = EnvironmentEdgeManager.currentTime() - messagePeriod * 2; for (Pair<ServerName, Long> dead : getDeadServers(since)) { lastSent.putIfAbsent(dead.getFirst(), 0); } List<Map.Entry<ServerName, Integer>> entries = new ArrayList<Map.Entry<ServerName, Integer>>(); entries.addAll(lastSent.entrySet()); Collections.sort(entries, new Comparator<Map.Entry<ServerName, Integer>>() { @Override public int compare(Map.Entry<ServerName, Integer> o1, Map.Entry<ServerName, Integer> o2) { return o1.getValue().compareTo(o2.getValue()); } }); int max = entries.size() > MAX_SERVER_PER_MESSAGE ? MAX_SERVER_PER_MESSAGE : entries.size(); List<ServerName> res = new ArrayList<ServerName>(max); for (int i = 0; i < max; i++) { Map.Entry<ServerName, Integer> toSend = entries.get(i); if (toSend.getValue() >= (NB_SEND - 1)) { lastSent.remove(toSend.getKey()); } else { lastSent.replace(toSend.getKey(), toSend.getValue(), toSend.getValue() + 1); } res.add(toSend.getKey()); } return res; } ClusterStatusPublisher(HMaster master, Configuration conf,
Class<? extends Publisher> publisherClass); protected ClusterStatusPublisher(); static final String STATUS_PUBLISHER_CLASS; static final Class<? extends ClusterStatusPublisher.Publisher> DEFAULT_STATUS_PUBLISHER_CLASS; static final String STATUS_PUBLISH_PERIOD; static final int DEFAULT_STATUS_PUBLISH_PERIOD; final static int MAX_SERVER_PER_MESSAGE; final static int NB_SEND; } | @Test public void testEmpty() { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List<Pair<ServerName, Long>> getDeadServers(long since) { return new ArrayList<Pair<ServerName, Long>>(); } }; Assert.assertTrue(csp.generateDeadServersListToSend().isEmpty()); }
@Test public void testMaxSend() { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List<Pair<ServerName, Long>> getDeadServers(long since) { List<Pair<ServerName, Long>> res = new ArrayList<Pair<ServerName, Long>>(); switch ((int) EnvironmentEdgeManager.currentTime()) { case 2: res.add(new Pair<ServerName, Long>(ServerName.valueOf("hn", 10, 10), 1L)); break; case 1000: break; } return res; } }; mee.setValue(2); for (int i = 0; i < ClusterStatusPublisher.NB_SEND; i++) { Assert.assertEquals("i=" + i, 1, csp.generateDeadServersListToSend().size()); } mee.setValue(1000); Assert.assertTrue(csp.generateDeadServersListToSend().isEmpty()); }
@Test public void testOrder() { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List<Pair<ServerName, Long>> getDeadServers(long since) { List<Pair<ServerName, Long>> res = new ArrayList<Pair<ServerName, Long>>(); for (int i = 0; i < 25; i++) { res.add(new Pair<ServerName, Long>(ServerName.valueOf("hn" + i, 10, 10), 20L)); } return res; } }; mee.setValue(3); List<ServerName> allSNS = csp.generateDeadServersListToSend(); Assert.assertEquals(10, ClusterStatusPublisher.MAX_SERVER_PER_MESSAGE); Assert.assertEquals(10, allSNS.size()); List<ServerName> nextMes = csp.generateDeadServersListToSend(); Assert.assertEquals(10, nextMes.size()); for (ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(20, allSNS.size()); nextMes = csp.generateDeadServersListToSend(); Assert.assertEquals(10, nextMes.size()); for (ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(25, allSNS.size()); nextMes = csp.generateDeadServersListToSend(); Assert.assertEquals(10, nextMes.size()); for (ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(25, allSNS.size()); } |
CatalogJanitor extends ScheduledChore { boolean cleanParent(final HRegionInfo parent, Result rowContent) throws IOException { boolean result = false; if (rowContent.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) { return result; } PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent); Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst()); Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond()); if (hasNoReferences(a) && hasNoReferences(b)) { LOG.debug("Deleting region " + parent.getRegionNameAsString() + " because daughter splits no longer hold references"); FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); MetaTableAccessor.deleteRegion(this.connection, parent); result = true; } return result; } CatalogJanitor(final Server server, final MasterServices services); boolean setEnabled(final boolean enabled); boolean cleanMergeQualifier(final HRegionInfo region); } | @Test public void testCleanParent() throws IOException, InterruptedException { HBaseTestingUtility htu = new HBaseTestingUtility(); setRootDirAndCleanIt(htu, "testCleanParent"); Server server = new MockServer(htu); try { MasterServices services = new MockMasterServices(server); CatalogJanitor janitor = new CatalogJanitor(server, services); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table")); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); Result r = createResult(parent, splita, splitb); Path rootdir = services.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName()); Reference ref = Reference.createTopReference(Bytes.toBytes("ccc")); long now = System.currentTimeMillis(); Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); Path path = ref.write(fs, p); assertTrue(fs.exists(path)); assertFalse(janitor.cleanParent(parent, r)); assertTrue(fs.delete(p, true)); assertTrue(janitor.cleanParent(parent, r)); } finally { server.stop("shutdown"); } }
@Test public void testArchiveOldRegion() throws Exception { String table = "table"; HBaseTestingUtility htu = new HBaseTestingUtility(); setRootDirAndCleanIt(htu, "testCleanParent"); Server server = new MockServer(htu); MasterServices services = new MockMasterServices(server); CatalogJanitor janitor = new CatalogJanitor(server, services); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); Result parentMetaRow = createResult(parent, splita, splitb); FileSystem fs = FileSystem.get(htu.getConfiguration()); Path rootdir = services.getMasterFileSystem().getRootDir(); FSUtils.setRootDir(fs.getConf(), rootdir); Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName()); Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName()); LOG.debug("Table dir:" + tabledir); LOG.debug("Store dir:" + storedir); LOG.debug("Store archive dir:" + storeArchive); FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir); FileStatus[] storeFiles = fs.listStatus(storedir); int index = 0; for (FileStatus file : storeFiles) { LOG.debug("Have store file:" + file.getPath()); assertEquals("Got unexpected store file", mockFiles[index].getPath(), storeFiles[index].getPath()); index++; } assertTrue(janitor.cleanParent(parent, parentMetaRow)); LOG.debug("Finished cleanup of parent region"); FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive); logFiles("archived files", storeFiles); logFiles("archived files", archivedStoreFiles); assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs); FSUtils.delete(fs, rootdir, true); services.stop("Test finished"); server.stop("Test finished"); janitor.cancel(true); }
@Test public void testDuplicateHFileResolution() throws Exception { String table = "table"; HBaseTestingUtility htu = new HBaseTestingUtility(); setRootDirAndCleanIt(htu, "testCleanParent"); Server server = new MockServer(htu); MasterServices services = new MockMasterServices(server); CatalogJanitor janitor = new CatalogJanitor(server, services); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); Result r = createResult(parent, splita, splitb); FileSystem fs = FileSystem.get(htu.getConfiguration()); Path rootdir = services.getMasterFileSystem().getRootDir(); FSUtils.setRootDir(fs.getConf(), rootdir); Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable()); Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName()); System.out.println("Old root:" + rootdir); System.out.println("Old table:" + tabledir); System.out.println("Old store:" + storedir); Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName()); System.out.println("Old archive:" + storeArchive); addMockStoreFiles(2, services, storedir); FileStatus[] storeFiles = fs.listStatus(storedir); assertTrue(janitor.cleanParent(parent, r)); FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive); assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs); addMockStoreFiles(2, services, storedir); assertTrue(janitor.cleanParent(parent, r)); archivedStoreFiles = fs.listStatus(storeArchive); assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true); services.stop("Test finished"); server.stop("shutdown"); janitor.cancel(true); } |
SplitLogManager { Task findOrCreateOrphanTask(String path) { Task orphanTask = new Task(); Task task; task = tasks.putIfAbsent(path, orphanTask); if (task == null) { LOG.info("creating orphan task " + path); SplitLogCounters.tot_mgr_orphan_task_acquired.incrementAndGet(); task = orphanTask; } return task; } SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName); @VisibleForTesting static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
final PathFilter filter); long splitLogDistributed(final Path logDir); long splitLogDistributed(final List<Path> logDirs); long splitLogDistributed(final Set<ServerName> serverNames, final List<Path> logDirs,
PathFilter filter); void stop(); void setRecoveryMode(boolean isForInitialization); void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions); boolean isLogReplaying(); boolean isLogSplitting(); RecoveryMode getRecoveryMode(); static final int DEFAULT_UNASSIGNED_TIMEOUT; } | @Test (timeout=180000) public void testOrphanTaskAcquisition() throws Exception { LOG.info("TestOrphanTaskAcquisition"); String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); SplitLogTask slt = new SplitLogTask.Owned(DUMMY_MASTER, this.mode); zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER); waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); Task task = slm.findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); assertFalse(task.isUnassigned()); long curt = System.currentTimeMillis(); assertTrue((task.last_update <= curt) && (task.last_update > (curt - 1000))); LOG.info("waiting for manager to resubmit the orphan task"); waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2); assertTrue(task.isUnassigned()); waitForCounter(tot_mgr_rescan, 0, 1, to + to/2); }
@Test (timeout=180000) public void testUnassignedOrphan() throws Exception { LOG.info("TestUnassignedOrphan - an unassigned task is resubmitted at" + " startup"); String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); SplitLogTask slt = new SplitLogTask.Unassigned(DUMMY_MASTER, this.mode); zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); int version = ZKUtil.checkExists(zkw, tasknode); slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER); waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); Task task = slm.findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); waitForCounter(tot_mgr_rescan, 0, 1, to/2); Task task2 = slm.findOrCreateOrphanTask(tasknode); assertTrue(task == task2); LOG.debug("task = " + task); assertEquals(1L, tot_mgr_resubmit.get()); assertEquals(1, task.incarnation.get()); assertEquals(0, task.unforcedResubmits.get()); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); assertTrue(ZKUtil.checkExists(zkw, tasknode) > version); } |
SplitLogManager { void handleDeadWorker(ServerName workerName) { synchronized (deadWorkersLock) { if (deadWorkers == null) { deadWorkers = new HashSet<ServerName>(100); } deadWorkers.add(workerName); } LOG.info("dead splitlog worker " + workerName); } SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName); @VisibleForTesting static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
final PathFilter filter); long splitLogDistributed(final Path logDir); long splitLogDistributed(final List<Path> logDirs); long splitLogDistributed(final Set<ServerName> serverNames, final List<Path> logDirs,
PathFilter filter); void stop(); void setRecoveryMode(boolean isForInitialization); void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions); boolean isLogReplaying(); boolean isLogSplitting(); RecoveryMode getRecoveryMode(); static final int DEFAULT_UNASSIGNED_TIMEOUT; } | @Test (timeout=180000) public void testDeadWorker() throws Exception { LOG.info("testDeadWorker"); conf.setLong("hbase.splitlog.max.resubmit", 0); slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER); TaskBatch batch = new TaskBatch(); String tasknode = submitTaskAndWait(batch, "foo/1"); int version = ZKUtil.checkExists(zkw, tasknode); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); if (tot_mgr_heartbeat.get() == 0) waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); slm.handleDeadWorker(worker1); if (tot_mgr_resubmit.get() == 0) waitForCounter(tot_mgr_resubmit, 0, 1, to+to/2); if (tot_mgr_resubmit_dead_server_task.get() == 0) { waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2); } int version1 = ZKUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); byte[] taskstate = ZKUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(DUMMY_MASTER)); return; } |
SplitLogManager { public long splitLogDistributed(final Path logDir) throws IOException { List<Path> logDirs = new ArrayList<Path>(); logDirs.add(logDir); return splitLogDistributed(logDirs); } SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName); @VisibleForTesting static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
final PathFilter filter); long splitLogDistributed(final Path logDir); long splitLogDistributed(final List<Path> logDirs); long splitLogDistributed(final Set<ServerName> serverNames, final List<Path> logDirs,
PathFilter filter); void stop(); void setRecoveryMode(boolean isForInitialization); void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions); boolean isLogReplaying(); boolean isLogSplitting(); RecoveryMode getRecoveryMode(); static final int DEFAULT_UNASSIGNED_TIMEOUT; } | @Test (timeout=180000) public void testEmptyLogDir() throws Exception { LOG.info("testEmptyLogDir"); slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER); FileSystem fs = TEST_UTIL.getTestFileSystem(); Path emptyLogDirPath = new Path(fs.getWorkingDirectory(), UUID.randomUUID().toString()); fs.mkdirs(emptyLogDirPath); slm.splitLogDistributed(emptyLogDirPath); assertFalse(fs.exists(emptyLogDirPath)); } |
SplitLogManager { void removeStaleRecoveringRegions(final Set<ServerName> failedServers) throws IOException, InterruptedIOException { Set<String> knownFailedServers = new HashSet<String>(); if (failedServers != null) { for (ServerName tmpServerName : failedServers) { knownFailedServers.add(tmpServerName.getServerName()); } } this.recoveringRegionLock.lock(); try { ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) .getSplitLogManagerCoordination().removeStaleRecoveringRegions(knownFailedServers); } finally { this.recoveringRegionLock.unlock(); } } SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName); @VisibleForTesting static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
final PathFilter filter); long splitLogDistributed(final Path logDir); long splitLogDistributed(final List<Path> logDirs); long splitLogDistributed(final Set<ServerName> serverNames, final List<Path> logDirs,
PathFilter filter); void stop(); void setRecoveryMode(boolean isForInitialization); void markRegionsRecovering(ServerName server, Set<HRegionInfo> userRegions); boolean isLogReplaying(); boolean isLogSplitting(); RecoveryMode getRecoveryMode(); static final int DEFAULT_UNASSIGNED_TIMEOUT; } | @Test(timeout = 300000) public void testRecoveryRegionRemovedFromZK() throws Exception { LOG.info("testRecoveryRegionRemovedFromZK"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); ZKUtil.createSetData(zkw, nodePath, ZKUtil.positionToByteArray(0L)); slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER); slm.removeStaleRecoveringRegions(null); List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.recoveringRegionsZNode, false); assertTrue("Recovery regions isn't cleaned", recoveringRegions.isEmpty()); } |
SimpleRegionNormalizer implements RegionNormalizer { @Override public List<NormalizationPlan> computePlanForTable(TableName table) throws HBaseIOException { if (table == null || table.isSystemTable()) { LOG.debug("Normalization of system table " + table + " isn't allowed"); return null; } List<NormalizationPlan> plans = new ArrayList<NormalizationPlan>(); List<HRegionInfo> tableRegions = masterServices.getAssignmentManager().getRegionStates(). getRegionsOfTable(table); if (tableRegions == null || tableRegions.size() < MIN_REGION_COUNT) { int nrRegions = tableRegions == null ? 0 : tableRegions.size(); LOG.debug("Table " + table + " has " + nrRegions + " regions, required min number" + " of regions for normalizer to run is " + MIN_REGION_COUNT + ", not running normalizer"); return null; } LOG.debug("Computing normalization plan for table: " + table + ", number of regions: " + tableRegions.size()); long totalSizeMb = 0; for (int i = 0; i < tableRegions.size(); i++) { HRegionInfo hri = tableRegions.get(i); long regionSize = getRegionSize(hri); totalSizeMb += regionSize; } double avgRegionSize = totalSizeMb / (double) tableRegions.size(); LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb); LOG.debug("Table " + table + ", average region size: " + avgRegionSize); int candidateIdx = 0; while (candidateIdx < tableRegions.size()) { HRegionInfo hri = tableRegions.get(candidateIdx); long regionSize = getRegionSize(hri); if (regionSize > 2 * avgRegionSize) { LOG.info("Table " + table + ", large region " + hri.getRegionNameAsString() + " has size " + regionSize + ", more than twice avg size, splitting"); plans.add(new SplitNormalizationPlan(hri, null)); } else { if (candidateIdx == tableRegions.size()-1) { break; } HRegionInfo hri2 = tableRegions.get(candidateIdx+1); long regionSize2 = getRegionSize(hri2); if (regionSize + regionSize2 < avgRegionSize) { LOG.info("Table " + table + ", small region size: " + regionSize + " plus its neighbor size: " + regionSize2 + ", less than the avg size " + avgRegionSize + ", merging them"); plans.add(new MergeNormalizationPlan(hri, hri2)); candidateIdx++; } } candidateIdx++; } if (plans.isEmpty()) { LOG.debug("No normalization needed, regions look good for table: " + table); return null; } Collections.sort(plans, planComparator); return plans; } @Override void setMasterServices(MasterServices masterServices); @Override List<NormalizationPlan> computePlanForTable(TableName table); } | @Test public void testNoNormalizationForMetaTable() throws HBaseIOException { TableName testTable = TableName.META_TABLE_NAME; List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); assertTrue(plans == null); }
@Test public void testNoNormalizationIfTooFewRegions() throws HBaseIOException { TableName testTable = TableName.valueOf("testSplitOfSmallRegion"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 10); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 15); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); assertTrue(plans == null); }
@Test public void testNoNormalizationOnNormalizedCluster() throws HBaseIOException { TableName testTable = TableName.valueOf("testSplitOfSmallRegion"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 10); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 15); HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(), 8); HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(), 10); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); assertTrue(plans == null); }
@Test public void testMergeOfSmallRegions() throws HBaseIOException { TableName testTable = TableName.valueOf("testMergeOfSmallRegions"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 15); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 5); HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(), 5); HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(), 15); HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("eee"), Bytes.toBytes("fff")); hris.add(hri5); regionSizes.put(hri5.getRegionName(), 16); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); NormalizationPlan plan = plans.get(0); assertTrue(plan instanceof MergeNormalizationPlan); assertEquals(hri2, ((MergeNormalizationPlan) plan).getFirstRegion()); assertEquals(hri3, ((MergeNormalizationPlan) plan).getSecondRegion()); }
@Test public void testMergeOfSecondSmallestRegions() throws HBaseIOException { TableName testTable = TableName.valueOf("testMergeOfSmallRegions"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 1); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 10000); HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(), 10000); HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(), 10000); HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("eee"), Bytes.toBytes("fff")); hris.add(hri5); regionSizes.put(hri5.getRegionName(), 2700); HRegionInfo hri6 = new HRegionInfo(testTable, Bytes.toBytes("fff"), Bytes.toBytes("ggg")); hris.add(hri6); regionSizes.put(hri6.getRegionName(), 2700); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); NormalizationPlan plan = plans.get(0); assertTrue(plan instanceof MergeNormalizationPlan); assertEquals(hri5, ((MergeNormalizationPlan) plan).getFirstRegion()); assertEquals(hri6, ((MergeNormalizationPlan) plan).getSecondRegion()); }
@Test public void testMergeOfSmallNonAdjacentRegions() throws HBaseIOException { TableName testTable = TableName.valueOf("testMergeOfSmallRegions"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 15); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 5); HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(), 16); HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(), 15); HRegionInfo hri5 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri5.getRegionName(), 5); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); assertTrue(plans == null); }
@Test public void testSplitOfLargeRegion() throws HBaseIOException { TableName testTable = TableName.valueOf("testSplitOfLargeRegion"); List<HRegionInfo> hris = new ArrayList<>(); Map<byte[], Integer> regionSizes = new HashMap<>(); HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(), 8); HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(), 6); HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(), 10); HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(), 30); setupMocksForNormalizer(regionSizes, hris); List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable); NormalizationPlan plan = plans.get(0); assertTrue(plan instanceof SplitNormalizationPlan); assertEquals(hri4, ((SplitNormalizationPlan) plan).getRegionInfo()); } |
StealJobQueue extends PriorityBlockingQueue<T> { @Override public T take() throws InterruptedException { lock.lockInterruptibly(); try { while (true) { T retVal = this.poll(); if (retVal == null) { retVal = stealFromQueue.poll(); } if (retVal == null) { notEmpty.await(); } else { return retVal; } } } finally { lock.unlock(); } } StealJobQueue(); BlockingQueue<T> getStealFromQueue(); @Override boolean offer(T t); @Override T take(); @Override T poll(long timeout, TimeUnit unit); } | @Test public void testTake() throws InterruptedException { stealJobQueue.offer(3); stealFromQueue.offer(10); stealJobQueue.offer(15); stealJobQueue.offer(4); assertEquals(3, stealJobQueue.take().intValue()); assertEquals(4, stealJobQueue.take().intValue()); assertEquals("always take from the main queue before trying to steal", 15, stealJobQueue.take().intValue()); assertEquals(10, stealJobQueue.take().intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); } |
CleanerChore extends ScheduledChore { @Override protected void chore() { try { FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir); checkAndDeleteEntries(files); } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); LOG.warn("Error while cleaning the logs", e); } } CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
FileSystem fs, Path oldFileDir, String confKey); @Override void cleanup(); } | @Test public void testSavesFilesOnRequest() throws Exception { Stoppable stop = new StoppableImplementation(); Configuration conf = UTIL.getConfiguration(); Path testDir = UTIL.getDataTestDir(); FileSystem fs = UTIL.getTestFileSystem(); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, NeverDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); Path parent = new Path(testDir, "parent"); Path file = new Path(parent, "someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.", fs.exists(file)); chore.chore(); assertTrue("File didn't get deleted", fs.exists(file)); assertTrue("Empty directory didn't get deleted", fs.exists(parent)); }
@Test public void testDeletesEmptyDirectories() throws Exception { Stoppable stop = new StoppableImplementation(); Configuration conf = UTIL.getConfiguration(); Path testDir = UTIL.getDataTestDir(); FileSystem fs = UTIL.getTestFileSystem(); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); Path parent = new Path(testDir, "parent"); Path child = new Path(parent, "child"); Path emptyChild = new Path(parent, "emptyChild"); Path file = new Path(child, "someFile"); fs.mkdirs(child); fs.mkdirs(emptyChild); fs.create(file).close(); Path topFile = new Path(testDir, "topFile"); fs.create(topFile).close(); assertTrue("Test file didn't get created.", fs.exists(file)); assertTrue("Test file didn't get created.", fs.exists(topFile)); chore.chore(); assertFalse("File didn't get deleted", fs.exists(topFile)); assertFalse("File didn't get deleted", fs.exists(file)); assertFalse("Empty directory didn't get deleted", fs.exists(child)); assertFalse("Empty directory didn't get deleted", fs.exists(parent)); }
@Test public void testDoesNotCheckDirectories() throws Exception { Stoppable stop = new StoppableImplementation(); Configuration conf = UTIL.getConfiguration(); Path testDir = UTIL.getDataTestDir(); FileSystem fs = UTIL.getTestFileSystem(); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); chore.cleanersChain.set(0, spy); Path parent = new Path(testDir, "parent"); Path file = new Path(parent, "someFile"); fs.mkdirs(parent); assertTrue("Test parent didn't get created.", fs.exists(parent)); fs.create(file).close(); assertTrue("Test file didn't get created.", fs.exists(file)); FileStatus fStat = fs.getFileStatus(parent); chore.chore(); Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat); Mockito.reset(spy); }
@Test public void testStoppedCleanerDoesNotDeleteFiles() throws Exception { Stoppable stop = new StoppableImplementation(); Configuration conf = UTIL.getConfiguration(); Path testDir = UTIL.getDataTestDir(); FileSystem fs = UTIL.getTestFileSystem(); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); Path topFile = new Path(testDir, "topFile"); fs.create(topFile).close(); assertTrue("Test file didn't get created.", fs.exists(topFile)); stop.stop("testing stop"); chore.chore(); assertTrue("File got deleted while chore was stopped", fs.exists(topFile)); }
@Test public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOException { Stoppable stop = new StoppableImplementation(); Configuration conf = UTIL.getConfiguration(); final Path testDir = UTIL.getDataTestDir(); final FileSystem fs = UTIL.getTestFileSystem(); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); chore.cleanersChain.set(0, spy); final Path parent = new Path(testDir, "parent"); Path file = new Path(parent, "someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.", fs.exists(file)); final Path addedFile = new Path(parent, "addedFile"); Mockito.doAnswer(new Answer<Boolean>() { @Override public Boolean answer(InvocationOnMock invocation) throws Throwable { fs.create(addedFile).close(); FSUtils.logFileSystemState(fs, testDir, LOG); return (Boolean) invocation.callRealMethod(); } }).when(spy).isFileDeletable(Mockito.any(FileStatus.class)); chore.chore(); assertTrue("Added file unexpectedly deleted", fs.exists(addedFile)); assertTrue("Parent directory deleted unexpectedly", fs.exists(parent)); assertFalse("Original file unexpectedly retained", fs.exists(file)); Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class)); Mockito.reset(spy); } |
CleanerChore extends ScheduledChore { @VisibleForTesting boolean checkAndDeleteDirectory(Path dir) { if (LOG.isTraceEnabled()) { LOG.trace("Checking directory: " + dir); } try { FileStatus[] children = FSUtils.listStatus(fs, dir); boolean allChildrenDeleted = checkAndDeleteEntries(children); if (!allChildrenDeleted) return false; } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); LOG.warn("Error while listing directory: " + dir, e); return false; } try { return fs.delete(dir, false); } catch (IOException e) { if (LOG.isTraceEnabled()) { LOG.trace("Couldn't delete directory: " + dir, e); } return false; } } CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
FileSystem fs, Path oldFileDir, String confKey); @Override void cleanup(); } | @Test public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception { Stoppable stop = new StoppableImplementation(); HBaseTestingUtility localUtil = new HBaseTestingUtility(); Configuration conf = localUtil.getConfiguration(); final Path testDir = UTIL.getDataTestDir(); final FileSystem fs = UTIL.getTestFileSystem(); LOG.debug("Writing test data to: " + testDir); String confKey = "hbase.test.cleaner.delegates"; conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey); AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); chore.cleanersChain.set(0, spy); final Path parent = new Path(testDir, "parent"); Path file = new Path(parent, "someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.", fs.exists(file)); final Path racyFile = new Path(parent, "addedFile"); Mockito.doAnswer(new Answer<Boolean>() { @Override public Boolean answer(InvocationOnMock invocation) throws Throwable { fs.create(racyFile).close(); FSUtils.logFileSystemState(fs, testDir, LOG); return (Boolean) invocation.callRealMethod(); } }).when(spy).isFileDeletable(Mockito.any(FileStatus.class)); if (chore.checkAndDeleteDirectory(parent)) { throw new Exception( "Reported success deleting directory, should have failed when adding file mid-iteration"); } assertTrue("Added file unexpectedly deleted", fs.exists(racyFile)); assertTrue("Parent directory deleted unexpectedly", fs.exists(parent)); assertFalse("Original file unexpectedly retained", fs.exists(file)); Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class)); } |
SnapshotLogCleaner extends BaseLogCleanerDelegate { @Override public void setConf(Configuration conf) { super.setConf(conf); try { long cacheRefreshPeriod = conf.getLong( WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD); final FileSystem fs = FSUtils.getCurrentFileSystem(conf); Path rootDir = FSUtils.getRootDir(conf); cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod, "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() { public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException { return SnapshotReferenceUtil.getWALNames(fs, snapshotDir); } }); } catch (IOException e) { LOG.error("Failed to create snapshot log cleaner", e); } } @Override synchronized Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files); @Override void setConf(Configuration conf); @Override void stop(String why); @Override boolean isStopped(); } | @Test public void testFindsSnapshotFilesWhenCleaning() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(conf); SnapshotLogCleaner cleaner = new SnapshotLogCleaner(); cleaner.setConf(conf); String snapshotName = "snapshot"; byte[] snapshot = Bytes.toBytes(snapshotName); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); Path snapshotLogDir = new Path(snapshotDir, HConstants.HREGION_LOGDIR_NAME); String timestamp = "1339643343027"; String hostFromMaster = "localhost%2C59648%2C1339643336601"; Path hostSnapshotLogDir = new Path(snapshotLogDir, hostFromMaster); String snapshotlogfile = hostFromMaster + "." + timestamp + ".hbase"; fs.create(new Path(hostSnapshotLogDir, snapshotlogfile)); Path oldlogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); Path logFile = new Path(oldlogDir, snapshotlogfile); fs.create(logFile); assertFalse(cleaner.isFileDeletable(fs.getFileStatus(logFile))); } |
ServerCrashProcedure extends StateMachineProcedure<MasterProcedureEnv, ServerCrashState> implements ServerProcedureInterface { @Override public ServerName getServerName() { return this.serverName; } ServerCrashProcedure(final ServerName serverName,
final boolean shouldSplitWal, final boolean carryingMeta); ServerCrashProcedure(); @Override void toStringClassDetails(StringBuilder sb); @Override void serializeStateData(final OutputStream stream); @Override void deserializeStateData(final InputStream stream); @Override ServerName getServerName(); @Override boolean hasMetaTableRegion(); @Override ServerOperationType getServerOperationType(); static final String KEY_SHORT_WAIT_ON_META; static final int DEFAULT_SHORT_WAIT_ON_META; static final String KEY_RETRIES_ON_META; static final int DEFAULT_RETRIES_ON_META; static final String KEY_WAIT_ON_RIT; static final int DEFAULT_WAIT_ON_RIT; } | @Test(timeout = 300000) public void testRecoveryAndDoubleExecutionOnline() throws Exception { final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); this.util.createTable(tableName, HBaseTestingUtility.COLUMNS, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); try (Table t = this.util.getConnection().getTable(tableName)) { this.util.loadTable(t, HBaseTestingUtility.COLUMNS[0]); int count = countRows(t); HMaster master = this.util.getHBaseCluster().getMaster(); final ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor(); master.setServerCrashProcessingEnabled(false); HRegionServer hrs = this.util.getHBaseCluster().getRegionServer(0); boolean carryingMeta = (master.getAssignmentManager().isCarryingMeta(hrs.getServerName()) == AssignmentManager.ServerHostRegion.HOSTING_REGION); this.util.getHBaseCluster().killRegionServer(hrs.getServerName()); hrs.join(); while (!master.getServerManager().isServerDead(hrs.getServerName())) Threads.sleep(10); master.setServerCrashProcessingEnabled(true); master.getServerManager().moveFromOnelineToDeadServers(hrs.getServerName()); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); long procId = procExec.submitProcedure(new ServerCrashProcedure(hrs.getServerName(), true, carryingMeta)); MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); assertEquals(count, countRows(t)); } } |
StorefileRefresherChore extends ScheduledChore { @Override protected void chore() { for (Region r : regionServer.getOnlineRegionsLocalContext()) { if (!r.isReadOnly()) { continue; } if (onlyMetaRefresh && !r.getRegionInfo().isMetaTable()) continue; String encodedName = r.getRegionInfo().getEncodedName(); long time = EnvironmentEdgeManager.currentTime(); if (!lastRefreshTimes.containsKey(encodedName)) { lastRefreshTimes.put(encodedName, time); } try { for (Store store : r.getStores()) { store.refreshStoreFiles(); } } catch (IOException ex) { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() + ", exception:" + StringUtils.stringifyException(ex)); if (isRegionStale(encodedName, time)) { ((HRegion)r).setReadsEnabled(false); } continue; } lastRefreshTimes.put(encodedName, time); ((HRegion)r).setReadsEnabled(true); } Iterator<String> lastRefreshTimesIter = lastRefreshTimes.keySet().iterator(); while (lastRefreshTimesIter.hasNext()) { String encodedName = lastRefreshTimesIter.next(); if (regionServer.getFromOnlineRegions(encodedName) == null) { lastRefreshTimesIter.remove(); } } } StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServer regionServer,
Stoppable stoppable); static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD; static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD; } | @Test public void testIsStale() throws IOException { int period = 0; byte[][] families = new byte[][] {Bytes.toBytes("cf")}; byte[] qf = Bytes.toBytes("cq"); HRegionServer regionServer = mock(HRegionServer.class); List<Region> regions = new ArrayList<Region>(); when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families); Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); regions.add(replica1); StaleStorefileRefresherChore chore = new StaleStorefileRefresherChore(period, regionServer, new StoppableImplementation()); putData(primary, 0, 100, qf, families); primary.flush(true); verifyData(primary, 0, 100, qf, families); try { verifyData(replica1, 0, 100, qf, families); Assert.fail("should have failed"); } catch(AssertionError ex) { } chore.chore(); verifyData(replica1, 0, 100, qf, families); ((FailingHRegionFileSystem)((HRegion)replica1).getRegionFileSystem()).fail = true; putData(primary, 100, 100, qf, families); primary.flush(true); verifyData(primary, 0, 200, qf, families); chore.chore(); verifyData(replica1, 0, 100, qf, families); try { verifyData(replica1, 100, 100, qf, families); Assert.fail("should have failed"); } catch(AssertionError ex) { } chore.isStale = true; chore.chore(); try { verifyData(replica1, 0, 100, qf, families); Assert.fail("should have failed with IOException"); } catch(IOException ex) { } } |
StripeStoreFileManager implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { @Override public ImmutableCollection<StoreFile> clearFiles() { ImmutableCollection<StoreFile> result = state.allFilesCached; this.state = new State(); this.fileStarts.clear(); this.fileEnds.clear(); return result; } StripeStoreFileManager(
KVComparator kvComparator, Configuration conf, StripeStoreConfig config); @Override void loadFiles(List<StoreFile> storeFiles); @Override Collection<StoreFile> getStorefiles(); @Override void insertNewFiles(Collection<StoreFile> sfs); @Override ImmutableCollection<StoreFile> clearFiles(); @Override int getStorefileCount(); @Override Iterator<StoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey); @Override Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
Iterator<StoreFile> candidateFiles, final KeyValue targetKey, final Cell candidate); @Override /** * Override of getSplitPoint that determines the split point as the boundary between two * stripes, unless it causes significant imbalance between split sides' sizes. In that * case, the split boundary will be chosen from the middle of one of the stripes to * minimize imbalance. * @return The split point, or null if no split is possible. */ byte[] getSplitPoint(); @Override Collection<StoreFile> getFilesForScanOrGet(
boolean isGet, byte[] startRow, byte[] stopRow); @Override void addCompactionResults(
Collection<StoreFile> compactedFiles, Collection<StoreFile> results); @Override int getStoreCompactionPriority(); @Override final byte[] getStartRow(int stripeIndex); @Override final byte[] getEndRow(int stripeIndex); @Override List<StoreFile> getLevel0Files(); @Override List<byte[]> getStripeBoundaries(); @Override ArrayList<ImmutableList<StoreFile>> getStripes(); @Override int getStripeCount(); @Override Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting); @Override double getCompactionPressure(); static final byte[] STRIPE_START_KEY; static final byte[] STRIPE_END_KEY; final static byte[] OPEN_KEY; } | @Test public void testClearFiles() throws Exception { StripeStoreFileManager manager = createManager(); manager.insertNewFiles(al(createFile())); manager.insertNewFiles(al(createFile())); manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY))); assertEquals(4, manager.getStorefileCount()); Collection<StoreFile> allFiles = manager.clearFiles(); assertEquals(4, allFiles.size()); assertEquals(0, manager.getStorefileCount()); assertEquals(0, manager.getStorefiles().size()); } |
StealJobQueue extends PriorityBlockingQueue<T> { @Override public T poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); try { while (true) { T retVal = this.poll(); if (retVal == null) { retVal = stealFromQueue.poll(); } if (retVal == null) { if (nanos <= 0) return null; nanos = notEmpty.awaitNanos(nanos); } else { return retVal; } } } finally { lock.unlock(); } } StealJobQueue(); BlockingQueue<T> getStealFromQueue(); @Override boolean offer(T t); @Override T take(); @Override T poll(long timeout, TimeUnit unit); } | @Test public void testPoll() throws InterruptedException { stealJobQueue.offer(3); stealFromQueue.offer(10); stealJobQueue.offer(15); stealJobQueue.offer(4); assertEquals(3, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals(4, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals("always take from the main queue before trying to steal", 15, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals(10, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); assertNull(stealJobQueue.poll(10, TimeUnit.MILLISECONDS)); }
@Test public void testPutInStealQueueFromShouldUnblockPoll() throws InterruptedException { final AtomicInteger taken = new AtomicInteger(); Thread consumer = new Thread() { @Override public void run() { try { Integer n = stealJobQueue.poll(3, TimeUnit.SECONDS); taken.set(n); } catch (InterruptedException e) { e.printStackTrace(); } } }; consumer.start(); stealFromQueue.put(3); consumer.join(1000); assertEquals(3, taken.get()); consumer.interrupt(); }
@Test public void testAddInStealJobQueueShouldUnblockPoll() throws InterruptedException { final AtomicInteger taken = new AtomicInteger(); Thread consumer = new Thread() { @Override public void run() { try { Integer n = stealJobQueue.poll(3, TimeUnit.SECONDS); taken.set(n); } catch (InterruptedException e) { e.printStackTrace(); } } }; consumer.start(); stealJobQueue.add(3); consumer.join(1000); assertEquals(3, taken.get()); consumer.interrupt(); } |
StripeStoreFileManager implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { @Override public List<StoreFile> getLevel0Files() { return this.state.level0Files; } StripeStoreFileManager(
KVComparator kvComparator, Configuration conf, StripeStoreConfig config); @Override void loadFiles(List<StoreFile> storeFiles); @Override Collection<StoreFile> getStorefiles(); @Override void insertNewFiles(Collection<StoreFile> sfs); @Override ImmutableCollection<StoreFile> clearFiles(); @Override int getStorefileCount(); @Override Iterator<StoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey); @Override Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
Iterator<StoreFile> candidateFiles, final KeyValue targetKey, final Cell candidate); @Override /** * Override of getSplitPoint that determines the split point as the boundary between two * stripes, unless it causes significant imbalance between split sides' sizes. In that * case, the split boundary will be chosen from the middle of one of the stripes to * minimize imbalance. * @return The split point, or null if no split is possible. */ byte[] getSplitPoint(); @Override Collection<StoreFile> getFilesForScanOrGet(
boolean isGet, byte[] startRow, byte[] stopRow); @Override void addCompactionResults(
Collection<StoreFile> compactedFiles, Collection<StoreFile> results); @Override int getStoreCompactionPriority(); @Override final byte[] getStartRow(int stripeIndex); @Override final byte[] getEndRow(int stripeIndex); @Override List<StoreFile> getLevel0Files(); @Override List<byte[]> getStripeBoundaries(); @Override ArrayList<ImmutableList<StoreFile>> getStripes(); @Override int getStripeCount(); @Override Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting); @Override double getCompactionPressure(); static final byte[] STRIPE_START_KEY; static final byte[] STRIPE_END_KEY; final static byte[] OPEN_KEY; } | @Test @SuppressWarnings("unchecked") public void testLoadFilesWithRecoverableBadFiles() throws Exception { ArrayList<StoreFile> validStripeFiles = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), createFile(KEY_C, OPEN_KEY)); ArrayList<StoreFile> filesToGoToL0 = al(createFile(), createFile(null, KEY_A), createFile(KEY_D, null), createFile(KEY_D, KEY_A), createFile(keyAfter(KEY_A), KEY_C), createFile(OPEN_KEY, KEY_D), createFile(KEY_D, keyAfter(KEY_D))); ArrayList<StoreFile> allFilesToGo = flattenLists(validStripeFiles, filesToGoToL0); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); List<StoreFile> l0Files = manager.getLevel0Files(); assertEquals(filesToGoToL0.size(), l0Files.size()); for (StoreFile sf : filesToGoToL0) { assertTrue(l0Files.contains(sf)); } verifyAllFiles(manager, allFilesToGo); }
@Test public void testLoadFilesWithBadStripe() throws Exception { ArrayList<StoreFile> allFilesToGo = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), createFile(KEY_B, keyAfter(KEY_B))); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); assertEquals(allFilesToGo.size(), manager.getLevel0Files().size()); } |
StoreFileInfo { @Override public int hashCode() { int hash = 17; hash = hash * 31 + ((reference == null) ? 0 : reference.hashCode()); hash = hash * 31 + ((initialPath == null) ? 0 : initialPath.hashCode()); hash = hash * 31 + ((link == null) ? 0 : link.hashCode()); return hash; } StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath); StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus); StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus,
final HFileLink link); StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus,
final Reference reference); static Path getIRIndexPathFromPath(Path hfilePath); static boolean isHFile(final Path path); static boolean isHFile(final String fileName); static boolean isReference(final Path path); static boolean isReference(final String name); static Path getReferredToFile(final Path p); static boolean validateStoreFileName(final String fileName); static boolean isValid(final FileStatus fileStatus); void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost); Reference getReference(); boolean isReference(); boolean isTopReference(); boolean isLink(); HDFSBlocksDistribution getHDFSBlockDistribution(); StoreFile.Reader open(final FileSystem fs, final CacheConfig cacheConf,
final boolean canUseDropBehind); HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs); FileStatus getReferencedFileStatus(final FileSystem fs); Path getPath(); FileStatus getFileStatus(); long getModificationTime(); @Override String toString(); @Override boolean equals(Object that); @Override int hashCode(); static final String HFILE_NAME_REGEX; final Reference reference; final HFileLink link; public Path irIndexPath; public Path irIndexReferencePath; public boolean hasIRIndex; public Path lmdDataPath; public Path lmdBucketPath; public boolean hasLMDIndex; } | @Test public void testEqualsWithLink() throws IOException { Path origin = new Path("/origin"); Path tmp = new Path("/tmp"); Path archive = new Path("/archive"); HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), new Path(archive, "f1")); HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), new Path(archive, "f1")); StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), null, link1); StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), null, link2); assertEquals(info1, info2); assertEquals(info1.hashCode(), info2.hashCode()); } |
ServerNonceManager { public void endOperation(long group, long nonce, boolean success) { if (nonce == HConstants.NO_NONCE) return; NonceKey nk = new NonceKey(group, nonce); OperationContext newResult = nonces.get(nk); assert newResult != null; synchronized (newResult) { assert newResult.getState() == OperationContext.WAIT; newResult.setState(success ? OperationContext.DONT_PROCEED : OperationContext.PROCEED); if (success) { newResult.reportActivity(); } else { OperationContext val = nonces.remove(nk); assert val == newResult; } if (newResult.hasWait()) { LOG.debug("Conflict with running op ended: " + nk + ", " + newResult); newResult.notifyAll(); } } } ServerNonceManager(Configuration conf); @VisibleForTesting void setConflictWaitIterationMs(int conflictWaitIterationMs); boolean startOperation(long group, long nonce, Stoppable stoppable); void endOperation(long group, long nonce, boolean success); void reportOperationFromWal(long group, long nonce, long writeTime); ScheduledChore createCleanupScheduledChore(Stoppable stoppable); static final String HASH_NONCE_GRACE_PERIOD_KEY; } | @Test public void testNoEndWithoutStart() { ServerNonceManager nm = createManager(); try { nm.endOperation(NO_NONCE, 1, true); fail("Should have thrown"); } catch (AssertionError err) {} } |
OpenRegionHandler extends EventHandler { @Override public void process() throws IOException { boolean openSuccessful = false; boolean transitionedToOpening = false; final String regionName = regionInfo.getRegionNameAsString(); HRegion region = null; try { if (this.server.isStopped() || this.rsServices.isStopping()) { return; } final String encodedName = regionInfo.getEncodedName(); if (this.rsServices.getFromOnlineRegions(encodedName) != null) { LOG.error("Region " + encodedName + " was already online when we started processing the opening. " + "Marking this new attempt as failed"); return; } if (!isRegionStillOpening()){ LOG.error("Region " + encodedName + " opening cancelled"); return; } if (useZKForAssignment && !coordination.transitionFromOfflineToOpening(regionInfo, ord)) { LOG.warn("Region was hijacked? Opening cancelled for encodedName=" + encodedName); return; } transitionedToOpening = true; region = openRegion(); if (region == null) { return; } boolean failed = true; if (isRegionStillOpening() && (!useZKForAssignment || coordination.tickleOpening(ord, regionInfo, rsServices, "post_region_open"))) { if (updateMeta(region, masterSystemTime)) { failed = false; } } if (failed || this.server.isStopped() || this.rsServices.isStopping()) { return; } if (!isRegionStillOpening() || (useZKForAssignment && !coordination.transitionToOpened(region, ord))) { return; } this.rsServices.addToOnlineRegions(region); openSuccessful = true; LOG.debug("Opened " + regionName + " on " + this.server.getServerName()); } finally { if (!openSuccessful) { doCleanUpOnFailedOpen(region, transitionedToOpening, ord); } final Boolean current = this.rsServices.getRegionsInTransitionInRS(). remove(this.regionInfo.getEncodedNameAsBytes()); if (openSuccessful) { if (current == null) { LOG.error("Bad state: we've just opened a region that was NOT in transition. Region=" + regionName); } else if (Boolean.FALSE.equals(current)) { LOG.error("Race condition: we've finished to open a region, while a close was requested " + " on region=" + regionName + ". It can be a critical error, as a region that" + " should be closed is now opened. Closing it now"); cleanupFailedOpen(region); } } } } OpenRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
HTableDescriptor htd, long masterSystemTime, OpenRegionCoordination coordination,
OpenRegionCoordination.OpenRegionDetails ord); protected OpenRegionHandler(final Server server,
final RegionServerServices rsServices, final HRegionInfo regionInfo,
final HTableDescriptor htd, EventType eventType, long masterSystemTime,
OpenRegionCoordination coordination, OpenRegionCoordination.OpenRegionDetails ord); HRegionInfo getRegionInfo(); @Override void process(); } | @Test public void testRegionServerAbortionDueToFailureTransitioningToOpened() throws IOException, NodeExistsException, KeeperException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = TEST_HRI; HRegion region = HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU .getConfiguration(), htd); assertNotNull(region); try { ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); ZkOpenRegionCoordination openRegionCoordination = new ZkOpenRegionCoordination(csm, server.getZooKeeper()) { @Override public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord) throws IOException { ZooKeeperWatcher zkw = server.getZooKeeper(); String node = ZKAssign.getNodeName(zkw, hri.getEncodedName()); try { ZKUtil.deleteNodeFailSilent(zkw, node); } catch (KeeperException e) { throw new RuntimeException("Ugh failed delete of " + node, e); } return super.transitionToOpened(r, ord); } }; OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd, -1, openRegionCoordination, zkCrd); rss.getRegionsInTransitionInRS().put( hri.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); rss.getRegionsInTransitionInRS().put( hri.getEncodedNameAsBytes(), Boolean.TRUE); ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName()); handler.process(); } catch (IOException ioe) { } finally { HRegion.closeHRegion(region); } assertTrue("region server should have aborted", server.isAborted()); }
@Test public void testFailedOpenRegion() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, -1, csm.getOpenRegionCoordination(), zkCrd) { @Override HRegion openRegion() { return null; } }; rsServices.getRegionsInTransitionInRS().put( TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); }
@Test public void testFailedUpdateMeta() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, -1, csm.getOpenRegionCoordination(), zkCrd) { @Override boolean updateMeta(final HRegion r, long masterSystemTime) { return false; } }; rsServices.getRegionsInTransitionInRS().put( TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); }
@Test public void testTransitionToFailedOpenEvenIfCleanupFails() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, -1, csm.getOpenRegionCoordination(), zkCrd) { @Override boolean updateMeta(HRegion r, long masterSystemTime) { return false; }; @Override void cleanupFailedOpen(HRegion region) throws IOException { throw new IOException("FileSystem got closed."); } }; rsServices.getRegionsInTransitionInRS().put(TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); try { handler.process(); } catch (Exception e) { } RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); }
@Test public void testTransitionToFailedOpenFromOffline() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(server.getServerName()); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); ZkOpenRegionCoordination openRegionCoordination = new ZkOpenRegionCoordination(csm, server.getZooKeeper()) { @Override public boolean transitionFromOfflineToOpening(HRegionInfo regionInfo, OpenRegionDetails ord) { return false; } }; OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, -1, openRegionCoordination, zkCrd); rsServices.getRegionsInTransitionInRS().put(TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); } |
CloseRegionHandler extends EventHandler { @Override public void process() { try { String name = regionInfo.getRegionNameAsString(); LOG.debug("Processing close of " + name); String encodedRegionName = regionInfo.getEncodedName(); HRegion region = (HRegion)rsServices.getFromOnlineRegions(encodedRegionName); if (region == null) { LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring"); return; } try { if (useZKForAssignment && closeRegionCoordination.checkClosingState( regionInfo, closeRegionDetails)) { return; } if (region.close(abort) == null) { LOG.warn("Can't close region: was already closed during close(): " + regionInfo.getRegionNameAsString()); return; } } catch (IOException ioe) { server.abort("Unrecoverable exception while closing region " + regionInfo.getRegionNameAsString() + ", still finishing close", ioe); throw new RuntimeException(ioe); } this.rsServices.removeFromOnlineRegions(region, destination); if (!useZKForAssignment) { rsServices.reportRegionStateTransition(TransitionCode.CLOSED, regionInfo); } else { closeRegionCoordination.setClosedState(region, this.server.getServerName(), closeRegionDetails); } LOG.debug("Closed " + region.getRegionInfo().getRegionNameAsString()); } finally { this.rsServices.getRegionsInTransitionInRS(). remove(this.regionInfo.getEncodedNameAsBytes()); } } CloseRegionHandler(final Server server,
final RegionServerServices rsServices,
final HRegionInfo regionInfo, final boolean abort,
CloseRegionCoordination closeRegionCoordination,
CloseRegionCoordination.CloseRegionDetails crd); CloseRegionHandler(final Server server,
final RegionServerServices rsServices,
final HRegionInfo regionInfo, final boolean abort,
CloseRegionCoordination closeRegionCoordination,
CloseRegionCoordination.CloseRegionDetails crd,
ServerName destination); CloseRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
boolean abort, CloseRegionCoordination closeRegionCoordination,
CloseRegionCoordination.CloseRegionDetails crd, EventType eventType); protected CloseRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
boolean abort, CloseRegionCoordination closeRegionCoordination,
CloseRegionCoordination.CloseRegionDetails crd,
EventType eventType, ServerName destination); HRegionInfo getRegionInfo(); @Override void process(); } | @Test public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException { final Server server = new MockServer(HTU, false); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); HRegion region = HTU.createLocalHRegion(hri, htd); try { assertNotNull(region); HRegion spy = Mockito.spy(region); final boolean abort = false; Mockito.when(spy.close(abort)). thenThrow(new IOException("Mocked failed close!")); rss.addToOnlineRegions(spy); assertFalse(server.isStopped()); ZkCoordinatedStateManager consensusProvider = new ZkCoordinatedStateManager(); consensusProvider.initialize(server); consensusProvider.start(); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(false); zkCrd.setExpectedVersion(-1); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, consensusProvider.getCloseRegionCoordination(), zkCrd); boolean throwable = false; try { handler.process(); } catch (Throwable t) { throwable = true; } finally { assertTrue(throwable); assertTrue(server.isStopped()); } } finally { HRegion.closeHRegion(region); } }
@Test public void testZKClosingNodeVersionMismatch() throws IOException, NodeExistsException, KeeperException, DeserializationException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = TEST_HRI; ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager(); coordinationProvider.initialize(server); coordinationProvider.start(); OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination()); int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName()); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(true); zkCrd.setExpectedVersion(versionOfClosingNode+1); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd); handler.process(); RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); assertTrue(rt.getEventType().equals(EventType.M_ZK_REGION_CLOSING )); }
@Test public void testCloseRegion() throws IOException, NodeExistsException, KeeperException, DeserializationException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; HRegionInfo hri = TEST_HRI; ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager(); coordinationProvider.initialize(server); coordinationProvider.start(); OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination()); int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName()); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(true); zkCrd.setExpectedVersion(versionOfClosingNode); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); assertTrue(rt.getEventType().equals(EventType.RS_ZK_REGION_CLOSED)); } |
RegionSplitPolicy extends Configured { protected byte[] getSplitPoint() { byte[] explicitSplitPoint = this.region.getExplicitSplitPoint(); if (explicitSplitPoint != null) { return explicitSplitPoint; } List<Store> stores = region.getStores(); byte[] splitPointFromLargestStore = null; long largestStoreSize = 0; for (Store s : stores) { byte[] splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint != null && largestStoreSize < storeSize) { splitPointFromLargestStore = splitPoint; largestStoreSize = storeSize; } } return splitPointFromLargestStore; } static RegionSplitPolicy create(HRegion region,
Configuration conf); static Class<? extends RegionSplitPolicy> getSplitPolicyClass(
HTableDescriptor htd, Configuration conf); } | @Test public void testGetSplitPoint() throws IOException { ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); assertFalse(policy.shouldSplit()); assertNull(policy.getSplitPoint()); HStore mockStore = Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("store 1 split")) .when(mockStore).getSplitPoint(); stores.add(mockStore); assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint())); HStore mockStore2 = Mockito.mock(HStore.class); Mockito.doReturn(4000L).when(mockStore2).getSize(); Mockito.doReturn(true).when(mockStore2).canSplit(); Mockito.doReturn(Bytes.toBytes("store 2 split")) .when(mockStore2).getSplitPoint(); stores.add(mockStore2); assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint())); } |
HeapMemoryManager { public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { BlockCache blockCache = CacheConfig.instantiateBlockCache(conf); if (blockCache instanceof ResizableBlockCache) { return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server, regionServerAccounting); } return null; } @VisibleForTesting HeapMemoryManager(ResizableBlockCache blockCache, FlushRequester memStoreFlusher,
Server server, RegionServerAccounting regionServerAccounting); static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
Server server, RegionServerAccounting regionServerAccounting); void start(ChoreService service); void stop(); float getHeapOccupancyPercent(); static final String BLOCK_CACHE_SIZE_MAX_RANGE_KEY; static final String BLOCK_CACHE_SIZE_MIN_RANGE_KEY; static final String MEMSTORE_SIZE_MAX_RANGE_KEY; static final String MEMSTORE_SIZE_MIN_RANGE_KEY; static final String HBASE_RS_HEAP_MEMORY_TUNER_PERIOD; static final int HBASE_RS_HEAP_MEMORY_TUNER_DEFAULT_PERIOD; static final String HBASE_RS_HEAP_MEMORY_TUNER_CLASS; } | @Test public void testWhenMemstoreAndBlockCacheMaxMinChecksFails() throws Exception { BlockCacheStub blockCache = new BlockCacheStub(0); MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub(0); Configuration conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.06f); try { new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub()); fail(); } catch (RuntimeException e) { } conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.2f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); try { new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub()); fail(); } catch (RuntimeException e) { } } |
MetricsWAL extends WALActionsListener.Base { @Override public void logRollRequested(boolean underReplicated) { source.incrementLogRollRequested(); if (underReplicated) { source.incrementLowReplicationLogRoll(); } } MetricsWAL(); @VisibleForTesting MetricsWAL(MetricsWALSource s); @Override void postSync(final long timeInNanos, final int handlerSyncs); @Override void postAppend(final long size, final long time); @Override void logRollRequested(boolean underReplicated); } | @Test public void testLogRollRequested() throws Exception { MetricsWALSource source = mock(MetricsWALSourceImpl.class); MetricsWAL metricsWAL = new MetricsWAL(source); metricsWAL.logRollRequested(false); metricsWAL.logRollRequested(true); verify(source, times(2)).incrementLogRollRequested(); verify(source, times(1)).incrementLowReplicationLogRoll(); } |
MetricsWAL extends WALActionsListener.Base { @Override public void postSync(final long timeInNanos, final int handlerSyncs) { source.incrementSyncTime(timeInNanos/1000000L); } MetricsWAL(); @VisibleForTesting MetricsWAL(MetricsWALSource s); @Override void postSync(final long timeInNanos, final int handlerSyncs); @Override void postAppend(final long size, final long time); @Override void logRollRequested(boolean underReplicated); } | @Test public void testPostSync() throws Exception { long nanos = TimeUnit.MILLISECONDS.toNanos(145); MetricsWALSource source = mock(MetricsWALSourceImpl.class); MetricsWAL metricsWAL = new MetricsWAL(source); metricsWAL.postSync(nanos, 1); verify(source, times(1)).incrementSyncTime(145); } |
FSHLog implements WAL { @Override public void close() throws IOException { shutdown(); final FileStatus[] files = getFiles(); if (null != files && 0 != files.length) { for (FileStatus file : files) { Path p = getWALArchivePath(this.fullPathArchiveDir, file.getPath()); if (!this.listeners.isEmpty()) { for (WALActionsListener i : this.listeners) { i.preLogArchive(file.getPath(), p); } } if (!FSUtils.renameAndSetModifyTime(fs, file.getPath(), p)) { throw new IOException("Unable to rename " + file.getPath() + " to " + p); } if (!this.listeners.isEmpty()) { for (WALActionsListener i : this.listeners) { i.postLogArchive(file.getPath(), p); } } } LOG.debug("Moved " + files.length + " WAL file(s) to " + FSUtils.getPath(this.fullPathArchiveDir)); } LOG.info("Closed WAL: " + toString()); } FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf); FSHLog(final FileSystem fs, final Path rootDir, final String logDir,
final String archiveDir, final Configuration conf,
final List<WALActionsListener> listeners,
final boolean failIfWALExists, final String prefix, final String suffix); @Override void registerWALActionsListener(final WALActionsListener listener); @Override boolean unregisterWALActionsListener(final WALActionsListener listener); @Override WALCoprocessorHost getCoprocessorHost(); @Override byte [][] rollWriter(); @Override byte [][] rollWriter(boolean force); static Path getWALArchivePath(Path archiveDir, Path p); Path getCurrentFileName(); @Override String toString(); @Override void close(); @Override void shutdown(); @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH_EXCEPTION", justification="Will never be null") @Override long append(final HTableDescriptor htd, final HRegionInfo hri, final WALKey key,
final WALEdit edits, final boolean inMemstore); @Override void sync(); @Override void sync(long txid); void requestLogRoll(); int getNumRolledLogFiles(); int getNumLogFiles(); long getLogFileSize(); @Override Long startCacheFlush(final byte[] encodedRegionName, Set<byte[]> families); @Override void completeCacheFlush(final byte [] encodedRegionName); @Override void abortCacheFlush(byte[] encodedRegionName); @Override long getEarliestMemstoreSeqNum(byte[] encodedRegionName); @Override long getEarliestMemstoreSeqNum(byte[] encodedRegionName, byte[] familyName); static void main(String[] args); static final long FIXED_OVERHEAD; } | @Test public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException { final String name = "testSyncRunnerIndexOverflow"; FSHLog log = new FSHLog(fs, FSUtils.getRootDir(conf), name, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); try { Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler"); ringBufferEventHandlerField.setAccessible(true); FSHLog.RingBufferEventHandler ringBufferEventHandler = (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log); Field syncRunnerIndexField = FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex"); syncRunnerIndexField.setAccessible(true); syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row")); HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); for (int i = 0; i < 10; i++) { addEdits(log, hri, htd, 1, mvcc); } } finally { log.close(); } } |
SequenceIdAccounting { Long startCacheFlush(final byte[] encodedRegionName, final Set<byte[]> families) { Map<byte[], Long> oldSequenceIds = null; Long lowestUnflushedInRegion = HConstants.NO_SEQNUM; synchronized (tieLock) { Map<byte[], Long> m = this.lowestUnflushedSequenceIds.get(encodedRegionName); if (m != null) { for (byte[] familyName: families) { Long seqId = m.remove(familyName); if (seqId != null) { if (oldSequenceIds == null) oldSequenceIds = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); oldSequenceIds.put(familyName, seqId); } } if (oldSequenceIds != null && !oldSequenceIds.isEmpty()) { if (this.flushingSequenceIds.put(encodedRegionName, oldSequenceIds) != null) { LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + ", sequenceid=" + oldSequenceIds); } } if (m.isEmpty()) { this.lowestUnflushedSequenceIds.remove(encodedRegionName); } else { lowestUnflushedInRegion = Collections.min(m.values()); } } } if (oldSequenceIds != null && oldSequenceIds.isEmpty()) { LOG.warn("Couldn't find oldest sequenceid for " + Bytes.toString(encodedRegionName)); } return lowestUnflushedInRegion; } } | @Test public void testStartCacheFlush() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map<byte[], Long> m = new HashMap<byte[], Long>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); assertEquals(HConstants.NO_SEQNUM, (long)sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME); long sequenceid = 1; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); assertEquals(HConstants.NO_SEQNUM, (long)sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME); long currentSequenceId = sequenceid; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); final Set<byte[]> otherFamily = new HashSet<byte[]>(1); otherFamily.add(Bytes.toBytes("otherCf")); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); assertEquals(currentSequenceId, (long)sida.startCacheFlush(ENCODED_REGION_NAME, otherFamily)); sida.completeCacheFlush(ENCODED_REGION_NAME); } |
SequenceIdAccounting { boolean areAllLower(Map<byte[], Long> sequenceids) { Map<byte[], Long> flushing = null; Map<byte[], Long> unflushed = null; synchronized (this.tieLock) { flushing = flattenToLowestSequenceId(this.flushingSequenceIds); unflushed = flattenToLowestSequenceId(this.lowestUnflushedSequenceIds); } for (Map.Entry<byte[], Long> e : sequenceids.entrySet()) { long oldestFlushing = Long.MAX_VALUE; long oldestUnflushed = Long.MAX_VALUE; if (flushing != null) { if (flushing.containsKey(e.getKey())) oldestFlushing = flushing.get(e.getKey()); } if (unflushed != null) { if (unflushed.containsKey(e.getKey())) oldestUnflushed = unflushed.get(e.getKey()); } long min = Math.min(oldestFlushing, oldestUnflushed); if (min <= e.getValue()) return false; } return true; } } | @Test public void testAreAllLower() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map<byte[], Long> m = new HashMap<byte[], Long>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); assertTrue(sida.areAllLower(m)); long sequenceid = 1; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); assertTrue(sida.areAllLower(m)); m.put(ENCODED_REGION_NAME, sequenceid); assertFalse(sida.areAllLower(m)); long lowest = sida.getLowestSequenceId(ENCODED_REGION_NAME); assertEquals("Lowest should be first sequence id inserted", 1, lowest); m.put(ENCODED_REGION_NAME, lowest); assertFalse(sida.areAllLower(m)); sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES); assertFalse(sida.areAllLower(m)); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); assertTrue(sida.areAllLower(m)); sida.completeCacheFlush(ENCODED_REGION_NAME); m.put(ENCODED_REGION_NAME, sequenceid); assertTrue(sida.areAllLower(m)); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); lowest = sida.getLowestSequenceId(ENCODED_REGION_NAME); m.put(ENCODED_REGION_NAME, lowest); assertFalse(sida.areAllLower(m)); sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES); assertEquals(HConstants.NO_SEQNUM, sida.getLowestSequenceId(ENCODED_REGION_NAME)); sida.completeCacheFlush(ENCODED_REGION_NAME); assertEquals(HConstants.NO_SEQNUM, sida.getLowestSequenceId(ENCODED_REGION_NAME)); m.put(ENCODED_REGION_NAME, sequenceid); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); assertTrue(sida.areAllLower(m)); } |
StealJobQueue extends PriorityBlockingQueue<T> { public BlockingQueue<T> getStealFromQueue() { return stealFromQueue; } StealJobQueue(); BlockingQueue<T> getStealFromQueue(); @Override boolean offer(T t); @Override T take(); @Override T poll(long timeout, TimeUnit unit); } | @Test public void testInteractWithThreadPool() throws InterruptedException { StealJobQueue<Runnable> stealTasksQueue = new StealJobQueue<>(); final CountDownLatch stealJobCountDown = new CountDownLatch(3); final CountDownLatch stealFromCountDown = new CountDownLatch(3); ThreadPoolExecutor stealPool = new ThreadPoolExecutor(3, 3, 1, TimeUnit.DAYS, stealTasksQueue) { @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); stealJobCountDown.countDown(); } }; stealPool.prestartAllCoreThreads(); ThreadPoolExecutor stealFromPool = new ThreadPoolExecutor(3, 3, 1, TimeUnit.DAYS, stealTasksQueue.getStealFromQueue()) { @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); stealFromCountDown.countDown(); } }; for (int i = 0; i < 4; i++) { TestTask task = new TestTask(); stealFromPool.execute(task); } for (int i = 0; i < 2; i++) { TestTask task = new TestTask(); stealPool.execute(task); } stealJobCountDown.await(1, TimeUnit.SECONDS); stealFromCountDown.await(1, TimeUnit.SECONDS); assertEquals(0, stealFromCountDown.getCount()); assertEquals(0, stealJobCountDown.getCount()); } |
SequenceIdAccounting { byte[][] findLower(Map<byte[], Long> sequenceids) { List<byte[]> toFlush = null; synchronized (tieLock) { for (Map.Entry<byte[], Long> e: sequenceids.entrySet()) { Map<byte[], Long> m = this.lowestUnflushedSequenceIds.get(e.getKey()); if (m == null) continue; long lowest = getLowestSequenceId(m); if (lowest != HConstants.NO_SEQNUM && lowest <= e.getValue()) { if (toFlush == null) toFlush = new ArrayList<byte[]>(); toFlush.add(e.getKey()); } } } return toFlush == null? null: toFlush.toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY }); } } | @Test public void testFindLower() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map<byte[], Long> m = new HashMap<byte[], Long>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); long sequenceid = 1; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid++, true); assertTrue(sida.findLower(m) == null); m.put(ENCODED_REGION_NAME, sida.getLowestSequenceId(ENCODED_REGION_NAME)); assertTrue(sida.findLower(m).length == 1); m.put(ENCODED_REGION_NAME, sida.getLowestSequenceId(ENCODED_REGION_NAME) - 1); assertTrue(sida.findLower(m) == null); } |
SplitLogWorker implements Runnable { public void start() { worker = new Thread(null, this, "SplitLogWorker-" + server.getServerName().toShortString()); worker.start(); } SplitLogWorker(Server hserver, Configuration conf, RegionServerServices server,
TaskExecutor splitTaskExecutor); SplitLogWorker(final Server hserver, final Configuration conf,
final RegionServerServices server, final LastSequenceId sequenceIdChecker,
final WALFactory factory); @Override void run(); void stopTask(); void start(); void stop(); @VisibleForTesting int getTaskReadySeq(); } | @Test(timeout=60000) public void testAcquireTaskAtStartup() throws Exception { LOG.info("testAcquireTaskAtStartup"); SplitLogCounters.resetCounters(); final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); RegionServerServices mockedRS = getRegionServer(RS); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); SplitLogWorker slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } finally { stopSplitLogWorker(slw); } }
@Test(timeout=60000) public void testRaceForTask() throws Exception { LOG.info("testRaceForTask"); SplitLogCounters.resetCounters(); final String TRFT = "trft"; final ServerName SVR1 = ServerName.valueOf("svr1,1,1"); final ServerName SVR2 = ServerName.valueOf("svr2,1,1"); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), new SplitLogTask.Unassigned(MANAGER, this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); RegionServerServices mockedRS1 = getRegionServer(SVR1); RegionServerServices mockedRS2 = getRegionServer(SVR2); SplitLogWorker slw1 = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS1, neverEndingTask); SplitLogWorker slw2 = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS2, neverEndingTask); slw1.start(); slw2.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); assertTrue(waitForCounterBoolean(SplitLogCounters.tot_wkr_failed_to_grab_task_owned, 0, 1, WAIT_TIME, false) || SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.get() == 1); byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SVR1) || slt.isOwned(SVR2)); } finally { stopSplitLogWorker(slw1); stopSplitLogWorker(slw2); } }
@Test(timeout=60000) public void testRescan() throws Exception { LOG.info("testRescan"); SplitLogCounters.resetCounters(); final ServerName SRV = ServerName.valueOf("svr,1,1"); RegionServerServices mockedRS = getRegionServer(SRV); slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); slw.start(); Thread.yield(); Thread.sleep(100); String task = ZKSplitLog.getEncodedNodeName(zkw, "task"); SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER, this.mode); zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); ZKUtil.setData(zkw, task, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME); String rescan = ZKSplitLog.getEncodedNodeName(zkw, "RESCAN"); rescan = zkw.getRecoverableZooKeeper().create(rescan, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME); ZKUtil.setData(zkw, task, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 1, 2, WAIT_TIME); waitForCounter(SplitLogCounters.tot_wkr_task_acquired_rescan, 0, 1, WAIT_TIME); List<String> nodes = ZKUtil.listChildrenNoWatch(zkw, zkw.splitLogZNode); LOG.debug(nodes); int num = 0; for (String node : nodes) { num++; if (node.startsWith("RESCAN")) { String name = ZKSplitLog.getEncodedNodeName(zkw, node); String fn = ZKSplitLog.getFileName(name); byte [] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(zkw.splitLogZNode, fn)); slt = SplitLogTask.parseFrom(data); assertTrue(slt.toString(), slt.isDone(SRV)); } } assertEquals(2, num); }
@Test(timeout=60000) public void testAcquireMultiTasks() throws Exception { LOG.info("testAcquireMultiTasks"); SplitLogCounters.resetCounters(); final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); final int maxTasks = 3; Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt("hbase.regionserver.wal.max.splitters", maxTasks); RegionServerServices mockedRS = getRegionServer(RS); for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME); for (int i = 0; i < maxTasks; i++) { byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } } finally { stopSplitLogWorker(slw); } }
@Test(timeout=60000) public void testAcquireMultiTasksByAvgTasksPerRS() throws Exception { LOG.info("testAcquireMultiTasks"); SplitLogCounters.resetCounters(); final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); final ServerName RS2 = ServerName.valueOf("rs,1,2"); final int maxTasks = 3; Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt("hbase.regionserver.wal.max.splitters", maxTasks); RegionServerServices mockedRS = getRegionServer(RS); String rsPath = ZKUtil.joinZNode(zkw.rsZNode, RS.getServerName()); zkw.getRecoverableZooKeeper().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); rsPath = ZKUtil.joinZNode(zkw.rsZNode, RS2.getServerName()); zkw.getRecoverableZooKeeper().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); slw.start(); try { int acquiredTasks = 0; waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 2, WAIT_TIME); for (int i = 0; i < maxTasks; i++) { byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); if (slt.isOwned(RS)) { acquiredTasks++; } } assertEquals(2, acquiredTasks); } finally { stopSplitLogWorker(slw); } } |
HRegion implements HeapSize, PropagatingConfigurationObserver, Region { public static void closeHRegion(final HRegion r) throws IOException { if (r == null) return; r.close(); if (r.getWAL() == null) return; r.getWAL().close(); } @Deprecated @VisibleForTesting HRegion(final Path tableDir, final WAL wal,
final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo,
final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
final HTableDescriptor htd, final RegionServerServices rsServices); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor,
final WAL wal, final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static Region openHRegion(final Region other, final CancelableProgressable reporter); static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated @VisibleForTesting static Path getRegionDir(final Path rootdir,
final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte[] row); static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset,
final short length); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); static void main(String[] args); long getSmallestReadPoint(); Set<byte[]> getAvaliableIndexes(); @Deprecated long initialize(); boolean hasReferences(); @Override HDFSBlocksDistribution getHDFSBlocksDistribution(); long addAndGetGlobalMemstoreSize(long memStoreSize); @Override HRegionInfo getRegionInfo(); RegionServerServices getRegionServerServices(); @Override long getReadRequestsCount(); @Override void updateReadRequestsCount(long i); @Override long getWriteRequestsCount(); @Override void updateWriteRequestsCount(long i); @Override long getMemstoreSize(); @Override long getNumMutationsWithoutWAL(); @Override long getDataInMemoryWithoutWAL(); @Override long getBlockedRequestsCount(); @Override long getCheckAndMutateChecksPassed(); @Override long getCheckAndMutateChecksFailed(); @Override MetricsRegion getMetrics(); @Override boolean isClosed(); @Override boolean isClosing(); @VisibleForTesting void setClosing(boolean closing); @Override boolean isReadOnly(); @Override boolean isRecovering(); void setRecovering(boolean newState); @Override boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConcurrencyControl getMVCC(); @Override long getMaxFlushedSeqId(); @Override long getReadpoint(IsolationLevel isolationLevel); @Override boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); @Override void waitForFlushesAndCompactions(); @Override HTableDescriptor getTableDesc(); WAL getWAL(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); @Override long getEarliestFlushTimeForAllStores(); @Override long getOldestHfileTs(boolean majorCompactioOnly); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); @Override void triggerMajorCompaction(); @Override void compact(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController, User user); @Override FlushResult flush(boolean force); FlushResult flushcache(boolean forceFlushAllStores, boolean writeFlushRequestWalMarker); @Override Result getClosestRowBefore(final byte[] row, final byte[] family); @Override RegionScanner getScanner(Scan scan); @Override RegionScanner getScanner(Scan scan, List<KeyValueScanner> additionalScanners); @Override void prepareDelete(Delete delete); @Override void delete(Delete delete); @Override void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyMap,
byte[] byteNow); @Override void put(Put put); @Override OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); @Override OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId); @Override boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, Mutation w, boolean writeToWAL); @Override boolean checkAndRowMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, RowMutations rm, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); @Override void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now); void setReadsEnabled(boolean readsEnabled); @Override void checkFamilies(Collection<byte[]> families); @Override void checkTimestamps(final Map<byte[], List<Cell>> familyMap, long now); @Override boolean refreshStoreFiles(); @Override Store getStore(final byte[] column); @Override List<Store> getStores(); @Override List<String> getStoreFileList(final byte[][] columns); RowLock getRowLock(byte[] row); RowLock getRowLock(byte[] row, boolean readLock); @Override void releaseRowLocks(List<RowLock> rowLocks); @Override boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); @Override Result get(final Get get); @Override List<Cell> get(Get get, boolean withCoprocessor); @Override void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); @Override void mutateRowsWithLocks(Collection<Mutation> mutations,
Collection<byte[]> rowsToLock, long nonceGroup, long nonce); ClientProtos.RegionLoadStats getRegionStats(); @Override void processRowsWithLocks(RowProcessor<?, ?> processor); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long nonceGroup, long nonce); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup,
long nonce); Result append(Append append); @Override Result append(Append mutate, long nonceGroup, long nonce); Result increment(Increment increment); @Override Result increment(Increment mutation, long nonceGroup, long nonce); @Override long heapSize(); @Override boolean registerService(Service instance); @Override Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); @Override RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); @Override void startRegionOperation(); @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional") void startRegionOperation(Operation op); @Override void closeRegionOperation(); void closeRegionOperation(Operation operation); @Override long getOpenSeqNum(); @Override Map<byte[], Long> getMaxStoreSeqId(); @Override long getOldestSeqIdOfStore(byte[] familyName); @Override CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting long getSequenceId(); @Override void onConfigurationChange(Configuration conf); @Override void registerChildren(ConfigurationManager manager); @Override void deregisterChildren(ConfigurationManager manager); RegionSplitPolicy getSplitPolicy(); static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final int SYSTEM_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; public IndexTableRelation indexTableRelation; } | @Test public void testGetWhileRegionClose() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); Configuration hc = initSplit(); int numRows = 100; byte[][] families = { fam1, fam2, fam3 }; String method = name.getMethodName(); this.region = initHRegion(tableName, method, hc, families); try { final int startRow = 100; putData(startRow, numRows, qual1, families); putData(startRow, numRows, qual2, families); putData(startRow, numRows, qual3, families); final AtomicBoolean done = new AtomicBoolean(false); final AtomicInteger gets = new AtomicInteger(0); GetTillDoneOrException[] threads = new GetTillDoneOrException[10]; try { for (int i = 0; i < threads.length / 2; i++) { threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets); threads[i].setDaemon(true); threads[i].start(); } this.region.closing.set(true); for (int i = threads.length / 2; i < threads.length; i++) { threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets); threads[i].setDaemon(true); threads[i].start(); } } finally { if (this.region != null) { HRegion.closeHRegion(this.region); } } done.set(true); for (GetTillDoneOrException t : threads) { try { t.join(); } catch (InterruptedException e) { e.printStackTrace(); } if (t.e != null) { LOG.info("Exception=" + t.e); assertFalse("Found a NPE in " + t.getName(), t.e instanceof NullPointerException); } } } finally { HRegion.closeHRegion(this.region); this.region = null; } }
@Test public void testWeirdCacheBehaviour() throws Exception { byte[] TABLE = Bytes.toBytes("testWeirdCacheBehaviour"); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"), Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"), Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") }; this.region = initHRegion(TABLE, getName(), CONF, FAMILIES); try { String value = "this is the value"; String value2 = "this is some other value"; String keyPrefix1 = "prefix1"; String keyPrefix2 = "prefix2"; String keyPrefix3 = "prefix3"; putRows(this.region, 3, value, keyPrefix1); putRows(this.region, 3, value, keyPrefix2); putRows(this.region, 3, value, keyPrefix3); putRows(this.region, 3, value2, keyPrefix1); putRows(this.region, 3, value2, keyPrefix2); putRows(this.region, 3, value2, keyPrefix3); System.out.println("Checking values for key: " + keyPrefix1); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix1, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix2); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix2, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix3); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix3, value2, this.region)); deleteColumns(this.region, value2, keyPrefix1); deleteColumns(this.region, value2, keyPrefix2); deleteColumns(this.region, value2, keyPrefix3); System.out.println("Starting important checks....."); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1, 0, getNumberOfRows(keyPrefix1, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2, 0, getNumberOfRows(keyPrefix2, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, getNumberOfRows(keyPrefix3, value2, this.region)); } finally { HRegion.closeHRegion(this.region); this.region = null; } } |
HRegion implements HeapSize, PropagatingConfigurationObserver, Region { public static HRegion merge(final HRegion a, final HRegion b) throws IOException { if (!a.getRegionInfo().getTable().equals(b.getRegionInfo().getTable())) { throw new IOException("Regions do not belong to the same table"); } FileSystem fs = a.getRegionFileSystem().getFileSystem(); a.flush(true); b.flush(true); a.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + a); a.getRegionFileSystem().logFileSystemState(LOG); } b.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + b); b.getRegionFileSystem().logFileSystemState(LOG); } RegionMergeTransactionImpl rmt = new RegionMergeTransactionImpl(a, b, true); if (!rmt.prepare(null)) { throw new IOException("Unable to merge regions " + a + " and " + b); } HRegionInfo mergedRegionInfo = rmt.getMergedRegionInfo(); LOG.info( "starting merge of regions: " + a + " and " + b + " into new region " + mergedRegionInfo .getRegionNameAsString() + " with start key <" + Bytes .toStringBinary(mergedRegionInfo.getStartKey()) + "> and end key <" + Bytes .toStringBinary(mergedRegionInfo.getEndKey()) + ">"); HRegion dstRegion; try { dstRegion = (HRegion) rmt.execute(null, null); } catch (IOException ioe) { rmt.rollback(null, null); throw new IOException( "Failed merging region " + a + " and " + b + ", and successfully rolled back"); } dstRegion.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for new region"); dstRegion.getRegionFileSystem().logFileSystemState(LOG); } if (dstRegion.getRegionFileSystem().hasReferences(dstRegion.getTableDesc())) { throw new IOException("Merged region " + dstRegion + " still has references after the compaction, is compaction canceled?"); } HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo()); HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo()); LOG.info("merge completed. New region is " + dstRegion); return dstRegion; } @Deprecated @VisibleForTesting HRegion(final Path tableDir, final WAL wal,
final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo,
final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
final HTableDescriptor htd, final RegionServerServices rsServices); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor,
final WAL wal, final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static Region openHRegion(final Region other, final CancelableProgressable reporter); static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated @VisibleForTesting static Path getRegionDir(final Path rootdir,
final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte[] row); static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset,
final short length); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); static void main(String[] args); long getSmallestReadPoint(); Set<byte[]> getAvaliableIndexes(); @Deprecated long initialize(); boolean hasReferences(); @Override HDFSBlocksDistribution getHDFSBlocksDistribution(); long addAndGetGlobalMemstoreSize(long memStoreSize); @Override HRegionInfo getRegionInfo(); RegionServerServices getRegionServerServices(); @Override long getReadRequestsCount(); @Override void updateReadRequestsCount(long i); @Override long getWriteRequestsCount(); @Override void updateWriteRequestsCount(long i); @Override long getMemstoreSize(); @Override long getNumMutationsWithoutWAL(); @Override long getDataInMemoryWithoutWAL(); @Override long getBlockedRequestsCount(); @Override long getCheckAndMutateChecksPassed(); @Override long getCheckAndMutateChecksFailed(); @Override MetricsRegion getMetrics(); @Override boolean isClosed(); @Override boolean isClosing(); @VisibleForTesting void setClosing(boolean closing); @Override boolean isReadOnly(); @Override boolean isRecovering(); void setRecovering(boolean newState); @Override boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConcurrencyControl getMVCC(); @Override long getMaxFlushedSeqId(); @Override long getReadpoint(IsolationLevel isolationLevel); @Override boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); @Override void waitForFlushesAndCompactions(); @Override HTableDescriptor getTableDesc(); WAL getWAL(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); @Override long getEarliestFlushTimeForAllStores(); @Override long getOldestHfileTs(boolean majorCompactioOnly); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); @Override void triggerMajorCompaction(); @Override void compact(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController, User user); @Override FlushResult flush(boolean force); FlushResult flushcache(boolean forceFlushAllStores, boolean writeFlushRequestWalMarker); @Override Result getClosestRowBefore(final byte[] row, final byte[] family); @Override RegionScanner getScanner(Scan scan); @Override RegionScanner getScanner(Scan scan, List<KeyValueScanner> additionalScanners); @Override void prepareDelete(Delete delete); @Override void delete(Delete delete); @Override void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyMap,
byte[] byteNow); @Override void put(Put put); @Override OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); @Override OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId); @Override boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, Mutation w, boolean writeToWAL); @Override boolean checkAndRowMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, RowMutations rm, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); @Override void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now); void setReadsEnabled(boolean readsEnabled); @Override void checkFamilies(Collection<byte[]> families); @Override void checkTimestamps(final Map<byte[], List<Cell>> familyMap, long now); @Override boolean refreshStoreFiles(); @Override Store getStore(final byte[] column); @Override List<Store> getStores(); @Override List<String> getStoreFileList(final byte[][] columns); RowLock getRowLock(byte[] row); RowLock getRowLock(byte[] row, boolean readLock); @Override void releaseRowLocks(List<RowLock> rowLocks); @Override boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); @Override Result get(final Get get); @Override List<Cell> get(Get get, boolean withCoprocessor); @Override void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); @Override void mutateRowsWithLocks(Collection<Mutation> mutations,
Collection<byte[]> rowsToLock, long nonceGroup, long nonce); ClientProtos.RegionLoadStats getRegionStats(); @Override void processRowsWithLocks(RowProcessor<?, ?> processor); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long nonceGroup, long nonce); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup,
long nonce); Result append(Append append); @Override Result append(Append mutate, long nonceGroup, long nonce); Result increment(Increment increment); @Override Result increment(Increment mutation, long nonceGroup, long nonce); @Override long heapSize(); @Override boolean registerService(Service instance); @Override Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); @Override RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); @Override void startRegionOperation(); @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional") void startRegionOperation(Operation op); @Override void closeRegionOperation(); void closeRegionOperation(Operation operation); @Override long getOpenSeqNum(); @Override Map<byte[], Long> getMaxStoreSeqId(); @Override long getOldestSeqIdOfStore(byte[] familyName); @Override CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting long getSequenceId(); @Override void onConfigurationChange(Configuration conf); @Override void registerChildren(ConfigurationManager manager); @Override void deregisterChildren(ConfigurationManager manager); RegionSplitPolicy getSplitPolicy(); static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final int SYSTEM_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; public IndexTableRelation indexTableRelation; } | @Test public void testMerge() throws IOException { byte[][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); String method = this.getName(); this.region = initHRegion(tableName, method, hc, families); try { LOG.info("" + HBaseTestCase.addContent(region, fam3)); region.flush(true); region.compactStores(); byte[] splitRow = region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion[] subregions = splitRegion(region, splitRow); try { for (int i = 0; i < subregions.length; i++) { HRegion.openHRegion(subregions[i], null); subregions[i].compactStores(); } Path oldRegionPath = region.getRegionFileSystem().getRegionDir(); Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir(); Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir(); long startTime = System.currentTimeMillis(); region = HRegion.mergeAdjacent(subregions[0], subregions[1]); LOG.info("Merge regions elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); FILESYSTEM.delete(oldRegion1, true); FILESYSTEM.delete(oldRegion2, true); FILESYSTEM.delete(oldRegionPath, true); LOG.info("splitAndMerge completed."); } finally { for (int i = 0; i < subregions.length; i++) { try { HRegion.closeHRegion(subregions[i]); } catch (IOException e) { } } } } finally { HRegion.closeHRegion(this.region); this.region = null; } } |
HRegion implements HeapSize, PropagatingConfigurationObserver, Region { @Override public HDFSBlocksDistribution getHDFSBlocksDistribution() { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); synchronized (this.stores) { for (Store store : this.stores.values()) { Collection<StoreFile> storeFiles = store.getStorefiles(); if (storeFiles == null) continue; for (StoreFile sf : storeFiles) { HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution(); hdfsBlocksDistribution.add(storeFileBlocksDistribution); } } } return hdfsBlocksDistribution; } @Deprecated @VisibleForTesting HRegion(final Path tableDir, final WAL wal,
final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo,
final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
final HTableDescriptor htd, final RegionServerServices rsServices); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor,
final WAL wal, final boolean initialize, final boolean ignoreWAL); static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Configuration conf, final HTableDescriptor hTableDescriptor, final WAL wal); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info,
final HTableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final WAL wal,
final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static Region openHRegion(final Region other, final CancelableProgressable reporter); static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated @VisibleForTesting static Path getRegionDir(final Path rootdir,
final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte[] row); static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset,
final short length); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); static void main(String[] args); long getSmallestReadPoint(); Set<byte[]> getAvaliableIndexes(); @Deprecated long initialize(); boolean hasReferences(); @Override HDFSBlocksDistribution getHDFSBlocksDistribution(); long addAndGetGlobalMemstoreSize(long memStoreSize); @Override HRegionInfo getRegionInfo(); RegionServerServices getRegionServerServices(); @Override long getReadRequestsCount(); @Override void updateReadRequestsCount(long i); @Override long getWriteRequestsCount(); @Override void updateWriteRequestsCount(long i); @Override long getMemstoreSize(); @Override long getNumMutationsWithoutWAL(); @Override long getDataInMemoryWithoutWAL(); @Override long getBlockedRequestsCount(); @Override long getCheckAndMutateChecksPassed(); @Override long getCheckAndMutateChecksFailed(); @Override MetricsRegion getMetrics(); @Override boolean isClosed(); @Override boolean isClosing(); @VisibleForTesting void setClosing(boolean closing); @Override boolean isReadOnly(); @Override boolean isRecovering(); void setRecovering(boolean newState); @Override boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConcurrencyControl getMVCC(); @Override long getMaxFlushedSeqId(); @Override long getReadpoint(IsolationLevel isolationLevel); @Override boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); @Override void waitForFlushesAndCompactions(); @Override HTableDescriptor getTableDesc(); WAL getWAL(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); @Override long getEarliestFlushTimeForAllStores(); @Override long getOldestHfileTs(boolean majorCompactioOnly); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); @Override void triggerMajorCompaction(); @Override void compact(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController); boolean compact(CompactionContext compaction, Store store,
CompactionThroughputController throughputController, User user); @Override FlushResult flush(boolean force); FlushResult flushcache(boolean forceFlushAllStores, boolean writeFlushRequestWalMarker); @Override Result getClosestRowBefore(final byte[] row, final byte[] family); @Override RegionScanner getScanner(Scan scan); @Override RegionScanner getScanner(Scan scan, List<KeyValueScanner> additionalScanners); @Override void prepareDelete(Delete delete); @Override void delete(Delete delete); @Override void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyMap,
byte[] byteNow); @Override void put(Put put); @Override OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); @Override OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId); @Override boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, Mutation w, boolean writeToWAL); @Override boolean checkAndRowMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
ByteArrayComparable comparator, RowMutations rm, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); @Override void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now); void setReadsEnabled(boolean readsEnabled); @Override void checkFamilies(Collection<byte[]> families); @Override void checkTimestamps(final Map<byte[], List<Cell>> familyMap, long now); @Override boolean refreshStoreFiles(); @Override Store getStore(final byte[] column); @Override List<Store> getStores(); @Override List<String> getStoreFileList(final byte[][] columns); RowLock getRowLock(byte[] row); RowLock getRowLock(byte[] row, boolean readLock); @Override void releaseRowLocks(List<RowLock> rowLocks); @Override boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); @Override Result get(final Get get); @Override List<Cell> get(Get get, boolean withCoprocessor); @Override void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); @Override void mutateRowsWithLocks(Collection<Mutation> mutations,
Collection<byte[]> rowsToLock, long nonceGroup, long nonce); ClientProtos.RegionLoadStats getRegionStats(); @Override void processRowsWithLocks(RowProcessor<?, ?> processor); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long nonceGroup, long nonce); @Override void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup,
long nonce); Result append(Append append); @Override Result append(Append mutate, long nonceGroup, long nonce); Result increment(Increment increment); @Override Result increment(Increment mutation, long nonceGroup, long nonce); @Override long heapSize(); @Override boolean registerService(Service instance); @Override Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); @Override RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); @Override void startRegionOperation(); @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional") void startRegionOperation(Operation op); @Override void closeRegionOperation(); void closeRegionOperation(Operation operation); @Override long getOpenSeqNum(); @Override Map<byte[], Long> getMaxStoreSeqId(); @Override long getOldestSeqIdOfStore(byte[] familyName); @Override CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting long getSequenceId(); @Override void onConfigurationChange(Configuration conf); @Override void registerChildren(ConfigurationManager manager); @Override void deregisterChildren(ConfigurationManager manager); RegionSplitPolicy getSplitPolicy(); static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final int SYSTEM_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; public IndexTableRelation indexTableRelation; } | @Test public void testgetHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.replication", 2); MiniHBaseCluster cluster = null; String dataNodeHosts[] = new String[] { "host1", "host2", "host3" }; int regionServersCount = 3; try { cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts); byte[][] families = { fam1, fam2 }; Table ht = htu.createTable(Bytes.toBytes(this.getName()), families); byte row[] = Bytes.toBytes("row1"); byte col[] = Bytes.toBytes("col1"); Put put = new Put(row); put.add(fam1, col, 1, Bytes.toBytes("test1")); put.add(fam2, col, 1, Bytes.toBytes("test2")); ht.put(put); HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())) .get(0); firstRegion.flush(true); HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution(); long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight(); StringBuilder sb = new StringBuilder(); for (String host: blocksDistribution1.getTopHosts()) { if (sb.length() > 0) sb.append(", "); sb.append(host); sb.append("="); sb.append(blocksDistribution1.getWeight(host)); } String topHost = blocksDistribution1.getTopHosts().get(0); long topHostWeight = blocksDistribution1.getWeight(topHost); String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString(); LOG.info(msg); assertTrue(msg, uniqueBlocksWeight1 == topHostWeight); HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution( htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo()); long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight(); assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2); ht.close(); } finally { if (cluster != null) { htu.shutdownMiniCluster(); } } } |
IdLock { void assertMapEmpty() { assert map.size() == 0; } Entry getLockEntry(long id); void releaseLockEntry(Entry entry); @VisibleForTesting void waitForWaiters(long id, int numWaiters); } | @Test public void testMultipleClients() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS); try { ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec); for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i = 0; i < NUM_THREADS; ++i) { Future<Boolean> result = ecs.take(); assertTrue(result.get()); } idLock.assertMapEmpty(); } finally { exec.shutdown(); exec.awaitTermination(5000, TimeUnit.MILLISECONDS); } } |
SnapshotManifest { public static SnapshotManifest open(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc) throws IOException { SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null); manifest.load(); return manifest; } private SnapshotManifest(final Configuration conf, final FileSystem fs,
final Path workingDir, final SnapshotDescription desc,
final ForeignExceptionSnare monitor); static SnapshotManifest create(final Configuration conf, final FileSystem fs,
final Path workingDir, final SnapshotDescription desc,
final ForeignExceptionSnare monitor); static SnapshotManifest open(final Configuration conf, final FileSystem fs,
final Path workingDir, final SnapshotDescription desc); void addTableDescriptor(final HTableDescriptor htd); void addRegion(final HRegion region); void addRegion(final Path tableDir, final HRegionInfo regionInfo); Path getSnapshotDir(); SnapshotDescription getSnapshotDescription(); HTableDescriptor getTableDescriptor(); List<SnapshotRegionManifest> getRegionManifests(); Map<String, SnapshotRegionManifest> getRegionManifestsMap(); void consolidate(); static ThreadPoolExecutor createExecutor(final Configuration conf, final String name); static final String SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY; static final String DATA_MANIFEST_NAME; } | @Test public void testReadSnapshotManifest() throws IOException { try { SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); fail("fail to test snapshot manifest because message size is too small."); } catch (InvalidProtocolBufferException ipbe) { try { conf.setInt(SnapshotManifest.SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 128 * 1024 * 1024); SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); LOG.info("open snapshot manifest succeed."); } catch (InvalidProtocolBufferException ipbe2) { fail("fail to take snapshot because Manifest proto-message too large."); } } } |
ExportSnapshot extends Configured implements Tool { static List<List<Pair<SnapshotFileInfo, Long>>> getBalancedSplits( final List<Pair<SnapshotFileInfo, Long>> files, final int ngroups) { Collections.sort(files, new Comparator<Pair<SnapshotFileInfo, Long>>() { public int compare(Pair<SnapshotFileInfo, Long> a, Pair<SnapshotFileInfo, Long> b) { long r = a.getSecond() - b.getSecond(); return (r < 0) ? -1 : ((r > 0) ? 1 : 0); } }); List<List<Pair<SnapshotFileInfo, Long>>> fileGroups = new LinkedList<List<Pair<SnapshotFileInfo, Long>>>(); long[] sizeGroups = new long[ngroups]; int hi = files.size() - 1; int lo = 0; List<Pair<SnapshotFileInfo, Long>> group; int dir = 1; int g = 0; while (hi >= lo) { if (g == fileGroups.size()) { group = new LinkedList<Pair<SnapshotFileInfo, Long>>(); fileGroups.add(group); } else { group = fileGroups.get(g); } Pair<SnapshotFileInfo, Long> fileInfo = files.get(hi--); sizeGroups[g] += fileInfo.getSecond(); group.add(fileInfo); g += dir; if (g == ngroups) { dir = -1; g = ngroups - 1; } else if (g < 0) { dir = 1; g = 0; } } if (LOG.isDebugEnabled()) { for (int i = 0; i < sizeGroups.length; ++i) { LOG.debug("export split=" + i + " size=" + StringUtils.humanReadableInt(sizeGroups[i])); } } return fileGroups; } @Override int run(String[] args); static void main(String[] args); static final String NAME; static final String CONF_SOURCE_PREFIX; static final String CONF_DEST_PREFIX; } | @Test public void testBalanceSplit() throws Exception { List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<Pair<SnapshotFileInfo, Long>>(); for (long i = 0; i <= 20; i++) { SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() .setType(SnapshotFileInfo.Type.HFILE) .setHfile("file-" + i) .build(); files.add(new Pair<SnapshotFileInfo, Long>(fileInfo, i)); } List<List<Pair<SnapshotFileInfo, Long>>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; verifyBalanceSplit(splits.get(0), split0, 42); String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; verifyBalanceSplit(splits.get(1), split1, 42); String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; verifyBalanceSplit(splits.get(2), split2, 42); String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; verifyBalanceSplit(splits.get(3), split3, 42); String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; verifyBalanceSplit(splits.get(4), split4, 42); } |
SnapshotDescriptionUtils { public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) throws IllegalArgumentException { if (!snapshot.hasTable()) { throw new IllegalArgumentException( "Descriptor doesn't apply to a table, so we can't build it."); } long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" + EnvironmentEdgeManager.currentTime() + ")."); SnapshotDescription.Builder builder = snapshot.toBuilder(); builder.setCreationTime(time); snapshot = builder.build(); } return snapshot; } private SnapshotDescriptionUtils(); static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type,
long defaultMaxWaitTime); static Path getSnapshotRootDir(final Path rootDir); static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir); static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir); static Path getWorkingSnapshotDir(final Path rootDir); static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir); static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir); static final Path getSnapshotsDir(Path rootDir); static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf); static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs); static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir); static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir,
FileSystem fs); static boolean isSnapshotOwner(final SnapshotDescription snapshot, final User user); static final int SNAPSHOT_LAYOUT_VERSION; static final String SNAPSHOTINFO_FILE; static final String SNAPSHOT_TMP_DIR_NAME; static final long NO_SNAPSHOT_START_TIME_SPECIFIED; static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS; static final long DEFAULT_MAX_WAIT_TIME; @Deprecated
static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT; @Deprecated
static final String SNAPSHOT_TIMEOUT_MILLIS_KEY; } | @Test public void testValidateMissingTableName() { Configuration conf = new Configuration(false); try { SnapshotDescriptionUtils.validate(SnapshotDescription.newBuilder().setName("fail").build(), conf); fail("Snapshot was considered valid without a table name"); } catch (IllegalArgumentException e) { LOG.debug("Correctly failed when snapshot doesn't have a tablename"); } } |
SnapshotDescriptionUtils { public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir, FileSystem fs) throws SnapshotCreationException, IOException { Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir); LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to " + finishedDir); if (!fs.rename(workingDir, finishedDir)) { throw new SnapshotCreationException("Failed to move working directory(" + workingDir + ") to completed directory(" + finishedDir + ").", snapshot); } } private SnapshotDescriptionUtils(); static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type,
long defaultMaxWaitTime); static Path getSnapshotRootDir(final Path rootDir); static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir); static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir); static Path getWorkingSnapshotDir(final Path rootDir); static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir); static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir); static final Path getSnapshotsDir(Path rootDir); static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf); static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs); static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir); static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir,
FileSystem fs); static boolean isSnapshotOwner(final SnapshotDescription snapshot, final User user); static final int SNAPSHOT_LAYOUT_VERSION; static final String SNAPSHOTINFO_FILE; static final String SNAPSHOT_TMP_DIR_NAME; static final long NO_SNAPSHOT_START_TIME_SPECIFIED; static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS; static final long DEFAULT_MAX_WAIT_TIME; @Deprecated
static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT; @Deprecated
static final String SNAPSHOT_TIMEOUT_MILLIS_KEY; } | @Test public void testCompleteSnapshotWithNoSnapshotDirectoryFailure() throws Exception { Path snapshotDir = new Path(root, HConstants.SNAPSHOT_DIR_NAME); Path tmpDir = new Path(snapshotDir, ".tmp"); Path workingDir = new Path(tmpDir, "not_a_snapshot"); assertFalse("Already have working snapshot dir: " + workingDir + " but shouldn't. Test file leak?", fs.exists(workingDir)); SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); try { SnapshotDescriptionUtils.completeSnapshot(snapshot, root, workingDir, fs); fail("Shouldn't successfully complete move of a non-existent directory."); } catch (IOException e) { LOG.info("Correctly failed to move non-existant directory: " + e.getMessage()); } } |
JMXListener implements Coprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { int rmiRegistryPort = -1; int rmiConnectorPort = -1; Configuration conf = env.getConfiguration(); if (env instanceof MasterCoprocessorEnvironment) { rmiRegistryPort = conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort); rmiConnectorPort = conf.getInt("master" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); LOG.info("Master rmiRegistryPort:" + rmiRegistryPort + ",Master rmiConnectorPort:" + rmiConnectorPort); } else if (env instanceof RegionServerCoprocessorEnvironment) { rmiRegistryPort = conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort); rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + rmiConnectorPort); } else if (env instanceof RegionCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Region Environment!"); return; } synchronized(JMXListener.class) { if (JMX_CS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); } else { startConnectorServer(rmiRegistryPort, rmiConnectorPort); } } } static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort,
int rmiConnectorPort); void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort); void stopConnectorServer(); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); static final String RMI_REGISTRY_PORT_CONF_KEY; static final String RMI_CONNECTOR_PORT_CONF_KEY; static final int defMasterRMIRegistryPort; static final int defRegionserverRMIRegistryPort; } | @Test public void testStart() throws Exception { JMXConnector connector = JMXConnectorFactory.connect( JMXListener.buildJMXServiceURL(connectorPort,connectorPort)); MBeanServerConnection mb = connector.getMBeanServerConnection(); String domain = mb.getDefaultDomain(); Assert.assertTrue("default domain is not correct", !domain.isEmpty()); connector.close(); } |
JMXListener implements Coprocessor { @Override public void stop(CoprocessorEnvironment env) throws IOException { stopConnectorServer(); } static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort,
int rmiConnectorPort); void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort); void stopConnectorServer(); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); static final String RMI_REGISTRY_PORT_CONF_KEY; static final String RMI_CONNECTOR_PORT_CONF_KEY; static final int defMasterRMIRegistryPort; static final int defRegionserverRMIRegistryPort; } | @Test public void testStop() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); LOG.info("shutdown hbase cluster..."); cluster.shutdown(); LOG.info("wait for the hbase cluster shutdown..."); cluster.waitUntilShutDown(); JMXConnector connector = JMXConnectorFactory.newJMXConnector( JMXListener.buildJMXServiceURL(connectorPort,connectorPort), null); expectedEx.expect(IOException.class); connector.connect(); } |
TagRewriteCell implements Cell, SettableSequenceId, SettableTimestamp, HeapSize { @Override public long heapSize() { long sum = CellUtil.estimatedHeapSizeOf(cell) - cell.getTagsLength(); sum += ClassSize.OBJECT; sum += (2 * ClassSize.REFERENCE); if (this.tags != null) { sum += ClassSize.align(ClassSize.ARRAY); sum += this.tags.length; } return sum; } TagRewriteCell(Cell cell, byte[] tags); @Override byte[] getRowArray(); @Override int getRowOffset(); @Override short getRowLength(); @Override byte[] getFamilyArray(); @Override int getFamilyOffset(); @Override byte getFamilyLength(); @Override byte[] getQualifierArray(); @Override int getQualifierOffset(); @Override int getQualifierLength(); @Override long getTimestamp(); @Override byte getTypeByte(); @Override @Deprecated long getMvccVersion(); @Override long getSequenceId(); @Override byte[] getValueArray(); @Override int getValueOffset(); @Override int getValueLength(); @Override byte[] getTagsArray(); @Override int getTagsOffset(); @Override int getTagsLength(); @Override @Deprecated byte[] getValue(); @Override @Deprecated byte[] getFamily(); @Override @Deprecated byte[] getQualifier(); @Override @Deprecated byte[] getRow(); @Override long heapSize(); @Override void setTimestamp(long ts); @Override void setTimestamp(byte[] ts, int tsOffset); @Override void setSequenceId(long seqId); } | @Test public void testHeapSize() { Cell originalCell = CellUtil.createCell(Bytes.toBytes("row"), Bytes.toBytes("value")); final int fakeTagArrayLength = 10; TagRewriteCell trCell = new TagRewriteCell(originalCell, new byte[fakeTagArrayLength]); long trCellHeapSize = trCell.heapSize(); TagRewriteCell trCell2 = new TagRewriteCell(trCell, new byte[fakeTagArrayLength]); assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be larger than a " + "single TagRewriteCell's heapsize", trCellHeapSize < trCell2.heapSize()); assertTrue("TagRewriteCell should have had nulled out tags array", trCell.heapSize() < trCellHeapSize); } |
KeyLocker { public ReentrantLock acquireLock(K key) { if (key == null) throw new IllegalArgumentException("key must not be null"); lockPool.purge(); ReentrantLock lock = lockPool.get(key); lock.lock(); return lock; } ReentrantLock acquireLock(K key); Map<K, Lock> acquireLocks(Set<? extends K> keys); } | @Test public void testLocker(){ KeyLocker<String> locker = new KeyLocker<String>(); ReentrantLock lock1 = locker.acquireLock("l1"); Assert.assertTrue(lock1.isHeldByCurrentThread()); ReentrantLock lock2 = locker.acquireLock("l2"); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock1 != lock2); ReentrantLock lock20 = locker.acquireLock("l2"); Assert.assertTrue(lock20 == lock2); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock20.unlock(); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock2.unlock(); Assert.assertFalse(lock20.isHeldByCurrentThread()); int lock2Hash = System.identityHashCode(lock2); lock2 = null; lock20 = null; System.gc(); System.gc(); System.gc(); ReentrantLock lock200 = locker.acquireLock("l2"); Assert.assertNotEquals(lock2Hash, System.identityHashCode(lock200)); lock200.unlock(); Assert.assertFalse(lock200.isHeldByCurrentThread()); Assert.assertTrue(lock1.isHeldByCurrentThread()); lock1.unlock(); Assert.assertFalse(lock1.isHeldByCurrentThread()); } |
DynamicClassLoader extends ClassLoaderBase { @Override public Class<?> loadClass(String name) throws ClassNotFoundException { try { return parent.loadClass(name); } catch (ClassNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Class " + name + " not found - using dynamical class loader"); } if (useDynamicJars) { return tryRefreshClass(name); } throw e; } } DynamicClassLoader(
final Configuration conf, final ClassLoader parent); @Override Class<?> loadClass(String name); } | @Test public void testLoadClassFromLocalPath() throws Exception { ClassLoader parent = TestDynamicClassLoader.class.getClassLoader(); DynamicClassLoader classLoader = new DynamicClassLoader(conf, parent); String className = "TestLoadClassFromLocalPath"; deleteClass(className); try { classLoader.loadClass(className); fail("Should not be able to load class " + className); } catch (ClassNotFoundException cnfe) { } try { String folder = TEST_UTIL.getDataTestDir().toString(); ClassLoaderTestHelper.buildJar( folder, className, null, ClassLoaderTestHelper.localDirPath(conf)); classLoader.loadClass(className); } catch (ClassNotFoundException cnfe) { LOG.error("Should be able to load class " + className, cnfe); fail(cnfe.getMessage()); } }
@Test public void testLoadClassFromAnotherPath() throws Exception { ClassLoader parent = TestDynamicClassLoader.class.getClassLoader(); DynamicClassLoader classLoader = new DynamicClassLoader(conf, parent); String className = "TestLoadClassFromAnotherPath"; deleteClass(className); try { classLoader.loadClass(className); fail("Should not be able to load class " + className); } catch (ClassNotFoundException cnfe) { } try { String folder = TEST_UTIL.getDataTestDir().toString(); ClassLoaderTestHelper.buildJar(folder, className, null); classLoader.loadClass(className); } catch (ClassNotFoundException cnfe) { LOG.error("Should be able to load class " + className, cnfe); fail(cnfe.getMessage()); } }
@Test public void testLoadClassFromLocalPathWithDynamicDirOff() throws Exception { conf.setBoolean("hbase.use.dynamic.jars", false); ClassLoader parent = TestDynamicClassLoader.class.getClassLoader(); DynamicClassLoader classLoader = new DynamicClassLoader(conf, parent); String className = "TestLoadClassFromLocalPath"; deleteClass(className); try { String folder = TEST_UTIL.getDataTestDir().toString(); ClassLoaderTestHelper.buildJar( folder, className, null, ClassLoaderTestHelper.localDirPath(conf)); classLoader.loadClass(className); fail("Should not be able to load class " + className); } catch (ClassNotFoundException cnfe) { } } |
HBaseFsck extends Configured implements Closeable { public HBaseFsck(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException, ClassNotFoundException { this(conf, createThreadPool(conf)); } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); @Override void close(); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table); static void setDisplayFullReport(); static void setForceExclusive(); boolean isExclusive(); static void setDisableBalancer(); boolean shouldDisableBalancer(); void setFixTableLocks(boolean shouldFix); void setFixTableZNodes(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf,
Path p); static void debugLsr(Configuration conf,
Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; } | @Test (timeout=180000) public void testHBaseFsck() throws Exception { assertNoErrors(doFsck(conf, false)); TableName table = TableName.valueOf("tableBadMetaAssign"); HTableDescriptor desc = new HTableDescriptor(table); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); createTable(TEST_UTIL, desc, null); assertNoErrors(doFsck(conf, false)); Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(table+",,")); ResultScanner scanner = meta.getScanner(scan); HRegionInfo hri = null; Result res = scanner.next(); ServerName currServer = ServerName.parseFrom(res.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER)); long startCode = Bytes.toLong(res.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER)); for (JVMClusterUtil.RegionServerThread rs : TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { ServerName sn = rs.getRegionServer().getServerName(); if (!currServer.getHostAndPort().equals(sn.getHostAndPort()) || startCode != sn.getStartcode()) { Put put = new Put(res.getRow()); put.setDurability(Durability.SKIP_WAL); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); meta.put(put); hri = MetaTableAccessor.getHRegionInfo(res); break; } } assertErrors(doFsck(conf, true), new ERROR_CODE[]{ ERROR_CODE.SERVER_DOES_NOT_MATCH_META}); TEST_UTIL.getHBaseCluster().getMaster() .getAssignmentManager().waitForAssignment(hri); assertNoErrors(doFsck(conf, false)); Table t = connection.getTable(table, tableExecutorService); ResultScanner s = t.getScanner(new Scan()); s.close(); t.close(); scanner.close(); meta.close(); } |
Threads { public static void sleepWithoutInterrupt(final long msToWait) { long timeMillis = System.currentTimeMillis(); long endTime = timeMillis + msToWait; boolean interrupted = false; while (timeMillis < endTime) { try { Thread.sleep(endTime - timeMillis); } catch (InterruptedException ex) { interrupted = true; } timeMillis = System.currentTimeMillis(); } if (interrupted) { Thread.currentThread().interrupt(); } } static Thread setDaemonThreadRunning(final Thread t); static Thread setDaemonThreadRunning(final Thread t,
final String name); static Thread setDaemonThreadRunning(final Thread t,
final String name, final UncaughtExceptionHandler handler); static void shutdown(final Thread t); static void shutdown(final Thread t, final long joinwait); static void threadDumpingIsAlive(final Thread t); static void sleep(long millis); static void sleepWithoutInterrupt(final long msToWait); static ThreadPoolExecutor getBoundedCachedThreadPool(
int maxCachedThread, long timeout, TimeUnit unit,
ThreadFactory threadFactory); static ThreadFactory getNamedThreadFactory(final String prefix); static ThreadFactory newDaemonThreadFactory(final String prefix); static ThreadFactory newDaemonThreadFactory(final String prefix,
final UncaughtExceptionHandler handler); static void setLoggingUncaughtExceptionHandler(Thread t); static synchronized void printThreadInfo(PrintStream stream, String title); } | @Test(timeout=60000) public void testSleepWithoutInterrupt() throws InterruptedException { Thread sleeper = new Thread(new Runnable() { @Override public void run() { LOG.debug("Sleeper thread: sleeping for " + SLEEP_TIME_MS); Threads.sleepWithoutInterrupt(SLEEP_TIME_MS); LOG.debug("Sleeper thread: finished sleeping"); wasInterrupted.set(Thread.currentThread().isInterrupted()); } }); LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)"); sleeper.start(); long startTime = System.currentTimeMillis(); LOG.debug("Main thread: sleeping for 200 ms"); Threads.sleep(200); LOG.debug("Interrupting the sleeper thread and sleeping for 500 ms"); sleeper.interrupt(); Threads.sleep(500); LOG.debug("Interrupting the sleeper thread and sleeping for 800 ms"); sleeper.interrupt(); Threads.sleep(800); LOG.debug("Interrupting the sleeper thread again"); sleeper.interrupt(); sleeper.join(); assertTrue("sleepWithoutInterrupt did not preserve the thread's " + "interrupted status", wasInterrupted.get()); long timeElapsed = System.currentTimeMillis() - startTime; assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " + " sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS); LOG.debug("Target sleep time: " + SLEEP_TIME_MS + ", time elapsed: " + timeElapsed); } |
OrderedBytes { public static int encodeString(PositionedByteRange dst, String val, Order ord) { if (null == val) { return encodeNull(dst, ord); } if (val.contains("\u0000")) throw new IllegalArgumentException("Cannot encode String values containing '\\u0000'"); final int offset = dst.getOffset(), start = dst.getPosition(); dst.put(TEXT); dst.put(val.getBytes(UTF8)); dst.put(TERM); ord.apply(dst.getBytes(), offset + start, dst.getPosition() - start); return dst.getPosition() - start; } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt8(PositionedByteRange src); static boolean isFixedInt16(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; } | @Test(expected = IllegalArgumentException.class) public void testStringNoNullChars() { PositionedByteRange buff = new SimplePositionedMutableByteRange(3); OrderedBytes.encodeString(buff, "\u0000", Order.ASCENDING); } |
OrderedBytes { public static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord) { if (null == val) { encodeNull(dst, ord); if (ASCENDING == ord) return 1; else { dst.put(ord.apply(TERM)); return 2; } } assert dst.getRemaining() >= vlen + (ASCENDING == ord ? 1 : 2); if (DESCENDING == ord) { for (int i = 0; i < vlen; i++) { if (TERM == val[voff + i]) { throw new IllegalArgumentException("0x00 bytes not permitted in value."); } } } final int offset = dst.getOffset(), start = dst.getPosition(); dst.put(BLOB_COPY); dst.put(val, voff, vlen); if (DESCENDING == ord) dst.put(TERM); ord.apply(dst.getBytes(), offset + start, dst.getPosition() - start); return dst.getPosition() - start; } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt8(PositionedByteRange src); static boolean isFixedInt16(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; } | @Test(expected = IllegalArgumentException.class) public void testBlobCopyNoZeroBytes() { byte[] val = { 0x01, 0x02, 0x00, 0x03 }; byte[] ascExpected = { 0x38, 0x01, 0x02, 0x00, 0x03 }; PositionedByteRange buf = new SimplePositionedMutableByteRange(val.length + 1); OrderedBytes.encodeBlobCopy(buf, val, Order.ASCENDING); assertArrayEquals(ascExpected, buf.getBytes()); buf.set(val.length + 2); OrderedBytes.encodeBlobCopy(buf, val, Order.DESCENDING); fail("test should never get here."); } |
OrderedBytes { public static int skip(PositionedByteRange src) { final int start = src.getPosition(); byte header = src.get(); Order ord = (-1 == Integer.signum(header)) ? DESCENDING : ASCENDING; header = ord.apply(header); switch (header) { case NULL: case NEG_INF: return 1; case NEG_LARGE: skipVaruint64(src, DESCENDING != ord); skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case NEG_MED_MIN: case NEG_MED_MIN + 0x01: case NEG_MED_MIN + 0x02: case NEG_MED_MIN + 0x03: case NEG_MED_MIN + 0x04: case NEG_MED_MIN + 0x05: case NEG_MED_MIN + 0x06: case NEG_MED_MIN + 0x07: case NEG_MED_MIN + 0x08: case NEG_MED_MIN + 0x09: case NEG_MED_MAX: skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case NEG_SMALL: skipVaruint64(src, DESCENDING == ord); skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case ZERO: return 1; case POS_SMALL: skipVaruint64(src, DESCENDING != ord); skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_MED_MIN: case POS_MED_MIN + 0x01: case POS_MED_MIN + 0x02: case POS_MED_MIN + 0x03: case POS_MED_MIN + 0x04: case POS_MED_MIN + 0x05: case POS_MED_MIN + 0x06: case POS_MED_MIN + 0x07: case POS_MED_MIN + 0x08: case POS_MED_MIN + 0x09: case POS_MED_MAX: skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_LARGE: skipVaruint64(src, DESCENDING == ord); skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_INF: return 1; case NAN: return 1; case FIXED_INT8: src.setPosition(src.getPosition() + 1); return src.getPosition() - start; case FIXED_INT16: src.setPosition(src.getPosition() + 2); return src.getPosition() - start; case FIXED_INT32: src.setPosition(src.getPosition() + 4); return src.getPosition() - start; case FIXED_INT64: src.setPosition(src.getPosition() + 8); return src.getPosition() - start; case FIXED_FLOAT32: src.setPosition(src.getPosition() + 4); return src.getPosition() - start; case FIXED_FLOAT64: src.setPosition(src.getPosition() + 8); return src.getPosition() - start; case TEXT: do { header = ord.apply(src.get()); } while (header != TERM); return src.getPosition() - start; case BLOB_VAR: do { header = ord.apply(src.get()); } while ((byte) (header & 0x80) != TERM); return src.getPosition() - start; case BLOB_COPY: if (Order.DESCENDING == ord) { do { header = ord.apply(src.get()); } while (header != TERM); return src.getPosition() - start; } else { src.setPosition(src.getLength()); return src.getPosition() - start; } default: throw unexpectedHeader(header); } } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen,
Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt8(PositionedByteRange src); static boolean isFixedInt16(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; } | @Test public void testSkip() { BigDecimal longMax = BigDecimal.valueOf(Long.MAX_VALUE); double negInf = Double.NEGATIVE_INFINITY; BigDecimal negLarge = longMax.multiply(longMax).negate(); BigDecimal negMed = new BigDecimal("-10.0"); BigDecimal negSmall = new BigDecimal("-0.0010"); long zero = 0l; BigDecimal posSmall = negSmall.negate(); BigDecimal posMed = negMed.negate(); BigDecimal posLarge = negLarge.negate(); double posInf = Double.POSITIVE_INFINITY; double nan = Double.NaN; byte int8 = 100; short int16 = 100; int int32 = 100; long int64 = 100l; float float32 = 100.0f; double float64 = 100.0d; String text = "hello world."; byte[] blobVar = Bytes.toBytes("foo"); byte[] blobCopy = Bytes.toBytes("bar"); for (Order ord : new Order[] { Order.ASCENDING, Order.DESCENDING }) { PositionedByteRange buff = new SimplePositionedMutableByteRange(30); int o; o = OrderedBytes.encodeNull(buff, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negInf, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negLarge, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negMed, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negSmall, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, zero, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posSmall, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posMed, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posLarge, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posInf, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, nan, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt8(buff, int8, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt16(buff, int16, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt32(buff, int32, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt64(buff, int64, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeFloat32(buff, float32, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeFloat64(buff, float64, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeString(buff, text, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeBlobVar(buff, blobVar, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.set(blobCopy.length + (Order.ASCENDING == ord ? 1 : 2)); o = OrderedBytes.encodeBlobCopy(buff, blobCopy, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); } } |
HBaseFsck extends Configured implements Closeable { private void closeRegion(HbckInfo hi) throws IOException, InterruptedException { if (hi.metaEntry == null && hi.hdfsEntry == null) { undeployRegions(hi); return; } Get get = new Get(hi.getRegionName()); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); if (hi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 0; i < numReplicas; i++) { get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i)); get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(i)); } } Result r = meta.get(get); RegionLocations rl = MetaTableAccessor.getRegionLocations(r); if (rl == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " since meta does not have handle to reach it"); return; } for (HRegionLocation h : rl.getRegionLocations()) { ServerName serverName = h.getServerName(); if (serverName == null) { errors.reportError("Unable to close region " + hi.getRegionNameAsString() + " because meta does not " + "have handle to reach it."); continue; } HRegionInfo hri = h.getRegionInfo(); if (hri == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); continue; } HBaseFsckRepair.closeRegionSilentlyAndWait(connection, serverName, hri); } } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); @Override void close(); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table); static void setDisplayFullReport(); static void setForceExclusive(); boolean isExclusive(); static void setDisableBalancer(); boolean shouldDisableBalancer(); void setFixTableLocks(boolean shouldFix); void setFixTableZNodes(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf,
Path p); static void debugLsr(Configuration conf,
Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; } | @Test(timeout=180000) public void testFixAssignmentsWhenMETAinTransition() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO); regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO); new MetaTableLocator().deleteMetaLocation(cluster.getMaster().getZooKeeper()); assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); HBaseFsck hbck = doFsck(conf, true); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.UNKNOWN, ERROR_CODE.NO_META_REGION, ERROR_CODE.NULL_META_REGION }); assertNoErrors(doFsck(conf, false)); } |
SimpleMutableByteRange extends AbstractByteRange { @Override public boolean equals(Object thatObject) { if (thatObject == null) { return false; } if (this == thatObject) { return true; } if (hashCode() != thatObject.hashCode()) { return false; } if (!(thatObject instanceof SimpleMutableByteRange)) { return false; } SimpleMutableByteRange that = (SimpleMutableByteRange) thatObject; return Bytes.equals(bytes, offset, length, that.bytes, that.offset, that.length); } SimpleMutableByteRange(); SimpleMutableByteRange(int capacity); SimpleMutableByteRange(byte[] bytes); SimpleMutableByteRange(byte[] bytes, int offset, int length); @Override ByteRange unset(); @Override ByteRange put(int index, byte val); @Override ByteRange put(int index, byte[] val); @Override ByteRange put(int index, byte[] val, int offset, int length); @Override ByteRange putShort(int index, short val); @Override ByteRange putInt(int index, int val); @Override ByteRange putLong(int index, long val); @Override int putVLong(int index, long val); @Override ByteRange deepCopy(); @Override ByteRange shallowCopy(); @Override ByteRange shallowCopySubRange(int innerOffset, int copyLength); @Override boolean equals(Object thatObject); } | @Test public void testEmpty(){ Assert.assertTrue(SimpleMutableByteRange.isEmpty(null)); ByteRange r = new SimpleMutableByteRange(); Assert.assertTrue(SimpleMutableByteRange.isEmpty(r)); Assert.assertTrue(r.isEmpty()); r.set(new byte[0]); Assert.assertEquals(0, r.getBytes().length); Assert.assertEquals(0, r.getOffset()); Assert.assertEquals(0, r.getLength()); Assert.assertTrue(Bytes.equals(new byte[0], r.deepCopyToNewArray())); Assert.assertEquals(0, r.compareTo(new SimpleMutableByteRange(new byte[0], 0, 0))); Assert.assertEquals(0, r.hashCode()); } |
WeakObjectPool { public V get(K key) { ObjectReference ref = referenceCache.get(key); if (ref != null) { V obj = ref.get(); if (obj != null) { return obj; } referenceCache.remove(key, ref); } V newObj = objectFactory.createObject(key); ObjectReference newRef = new ObjectReference(key, newObj); while (true) { ObjectReference existingRef = referenceCache.putIfAbsent(key, newRef); if (existingRef == null) { return newObj; } V existingObject = existingRef.get(); if (existingObject != null) { return existingObject; } referenceCache.remove(key, existingRef); } } WeakObjectPool(ObjectFactory<K, V> objectFactory); WeakObjectPool(ObjectFactory<K, V> objectFactory, int initialCapacity); WeakObjectPool(
ObjectFactory<K, V> objectFactory,
int initialCapacity,
int concurrencyLevel); void purge(); V get(K key); int size(); static final int DEFAULT_INITIAL_CAPACITY; static final int DEFAULT_CONCURRENCY_LEVEL; } | @Test public void testKeys() { Object obj1 = pool.get("a"); Object obj2 = pool.get(new String("a")); Assert.assertSame(obj1, obj2); Object obj3 = pool.get("b"); Assert.assertNotSame(obj1, obj3); }
@Test(timeout=1000) public void testCongestion() throws Exception { final int THREAD_COUNT = 100; final AtomicBoolean assertionFailed = new AtomicBoolean(); final AtomicReference<Object> expectedObjRef = new AtomicReference<Object>(); final CountDownLatch prepareLatch = new CountDownLatch(THREAD_COUNT); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(THREAD_COUNT); for (int i=0; i<THREAD_COUNT; i++) { new Thread() { @Override public void run() { prepareLatch.countDown(); try { startLatch.await(); Object obj = pool.get("a"); if (! expectedObjRef.compareAndSet(null, obj)) { if (expectedObjRef.get() != obj) { assertionFailed.set(true); } } } catch (Exception e) { assertionFailed.set(true); } finally { endLatch.countDown(); } } }.start(); } prepareLatch.await(); startLatch.countDown(); endLatch.await(); if (assertionFailed.get()) { Assert.fail(); } } |
AES extends Cipher { @VisibleForTesting SecureRandom getRNG() { return rng; } AES(CipherProvider provider); @Override String getName(); @Override int getKeyLength(); @Override int getIvLength(); @Override Key getRandomKey(); @Override Encryptor getEncryptor(); @Override Decryptor getDecryptor(); @Override OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv); @Override OutputStream createEncryptionStream(OutputStream out, Encryptor e); @Override InputStream createDecryptionStream(InputStream in, Context context, byte[] iv); @Override InputStream createDecryptionStream(InputStream in, Decryptor d); static final int KEY_LENGTH; static final int KEY_LENGTH_BITS; static final int BLOCK_SIZE; static final int IV_LENGTH; static final String CIPHER_MODE_KEY; static final String CIPHER_PROVIDER_KEY; static final String RNG_ALGORITHM_KEY; static final String RNG_PROVIDER_KEY; } | @Test public void testAlternateRNG() throws Exception { Security.addProvider(new TestProvider()); Configuration conf = new Configuration(); conf.set(AES.RNG_ALGORITHM_KEY, "TestRNG"); conf.set(AES.RNG_PROVIDER_KEY, "TEST"); DefaultCipherProvider.getInstance().setConf(conf); AES aes = new AES(DefaultCipherProvider.getInstance()); assertEquals("AES did not find alternate RNG", aes.getRNG().getAlgorithm(), "TestRNG"); } |
BoundedByteBufferPool { public void putBuffer(ByteBuffer bb) { if (bb.capacity() > this.maxByteBufferSizeToCache) return; boolean success = false; int average = 0; lock.lock(); try { success = this.buffers.offer(bb); if (success) { this.totalReservoirCapacity += bb.capacity(); average = this.totalReservoirCapacity / this.buffers.size(); } } finally { lock.unlock(); } if (!success) { LOG.warn("At capacity: " + this.buffers.size()); } else { if (average > this.runningAverage && average < this.maxByteBufferSizeToCache) { this.runningAverage = average; } } } BoundedByteBufferPool(final int maxByteBufferSizeToCache, final int initialByteBufferSize,
final int maxToCache); ByteBuffer getBuffer(); void putBuffer(ByteBuffer bb); } | @Test public void testEquivalence() { ByteBuffer bb = ByteBuffer.allocate(1); this.reservoir.putBuffer(bb); this.reservoir.putBuffer(bb); this.reservoir.putBuffer(bb); assertEquals(3, this.reservoir.buffers.size()); } |
ChoreService implements ChoreServicer { @Override public synchronized void cancelChore(ScheduledChore chore) { cancelChore(chore, true); } @VisibleForTesting ChoreService(final String coreThreadPoolPrefix); ChoreService(final String coreThreadPoolPrefix, final boolean jitter); ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean jitter); static ChoreService getInstance(final String coreThreadPoolPrefix); synchronized boolean scheduleChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); @Override synchronized boolean isChoreScheduled(ScheduledChore chore); @Override synchronized boolean triggerNow(ScheduledChore chore); @Override synchronized void onChoreMissedStartTime(ScheduledChore chore); synchronized void shutdown(); boolean isShutdown(); boolean isTerminated(); final static int MIN_CORE_POOL_SIZE; } | @Test (timeout=20000) public void testCancelChore() throws InterruptedException { final int period = 100; ScheduledChore chore1 = new DoNothingChore("chore1", period); ChoreService service = ChoreService.getInstance("testCancelChore"); try { service.scheduleChore(chore1); assertTrue(chore1.isScheduled()); chore1.cancel(true); assertFalse(chore1.isScheduled()); assertTrue(service.getNumberOfScheduledChores() == 0); } finally { shutdownService(service); } } |
ChoreService implements ChoreServicer { int getCorePoolSize() { return scheduler.getCorePoolSize(); } @VisibleForTesting ChoreService(final String coreThreadPoolPrefix); ChoreService(final String coreThreadPoolPrefix, final boolean jitter); ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean jitter); static ChoreService getInstance(final String coreThreadPoolPrefix); synchronized boolean scheduleChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); @Override synchronized boolean isChoreScheduled(ScheduledChore chore); @Override synchronized boolean triggerNow(ScheduledChore chore); @Override synchronized void onChoreMissedStartTime(ScheduledChore chore); synchronized void shutdown(); boolean isShutdown(); boolean isTerminated(); final static int MIN_CORE_POOL_SIZE; } | @Test (timeout=20000) public void testChoreServiceConstruction() throws InterruptedException { final int corePoolSize = 10; final int defaultCorePoolSize = ChoreService.MIN_CORE_POOL_SIZE; ChoreService customInit = new ChoreService("testChoreServiceConstruction_custom", corePoolSize, false); try { assertEquals(corePoolSize, customInit.getCorePoolSize()); } finally { shutdownService(customInit); } ChoreService defaultInit = new ChoreService("testChoreServiceConstruction_default"); try { assertEquals(defaultCorePoolSize, defaultInit.getCorePoolSize()); } finally { shutdownService(defaultInit); } ChoreService invalidInit = new ChoreService("testChoreServiceConstruction_invalid", -10, false); try { assertEquals(defaultCorePoolSize, invalidInit.getCorePoolSize()); } finally { shutdownService(invalidInit); } } |
HBaseFsck extends Configured implements Closeable { public Multimap<byte[], HbckInfo> getOverlapGroups( TableName table) { TableInfo ti = tablesInfo.get(table); return ti.overlapGroups; } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); @Override void close(); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table); static void setDisplayFullReport(); static void setForceExclusive(); boolean isExclusive(); static void setDisableBalancer(); boolean shouldDisableBalancer(); void setFixTableLocks(boolean shouldFix); void setFixTableZNodes(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf,
Path p); static void debugLsr(Configuration conf,
Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; } | @Test (timeout=180000) public void testHBaseFsckClean() throws Exception { assertNoErrors(doFsck(conf, false)); TableName table = TableName.valueOf("tableClean"); try { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); setupTable(table); assertEquals(ROWKEYS.length, countRows()); hbck = doFsck(conf, false); assertNoErrors(hbck); assertEquals(0, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testDupeStartKey() throws Exception { TableName table = TableName.valueOf("tableDupeStartKey"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("A2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testDupeRegion() throws Exception { TableName table = TableName.valueOf("tableDupeRegion"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); while (findDeployedHSI(getDeployedHRIs((HBaseAdmin) admin), hriDupe) == null) { Thread.sleep(250); } LOG.debug("Finished assignment of dupe region"); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testDegenerateRegions() throws Exception { TableName table = TableName.valueOf("tableDegenerateRegions"); try { setupTable(table); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf,false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DEGENERATE_REGION, ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS }); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testContainedRegionOverlap() throws Exception { TableName table = TableName.valueOf("tableContainedRegionOverlap"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testOverlapAndOrphan() throws Exception { TableName table = TableName.valueOf("tableOverlapAndOrphan"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); TEST_UTIL.getHBaseAdmin().enableTable(table); HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testCoveredStartKey() throws Exception { TableName table = TableName.valueOf("tableCoveredStartKey"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN, ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(3, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf, false); assertErrors(hbck2, new ERROR_CODE[0]); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testRegionHole() throws Exception { TableName table = TableName.valueOf("tableRegionHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, true); admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2 , countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testHDFSRegioninfoMissing() throws Exception { TableName table = TableName.valueOf("tableHDFSRegioninfoMissing"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); TEST_UTIL.getHBaseAdmin().enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testNotInMetaOrDeployedHole() throws Exception { TableName table = TableName.valueOf("tableNotInMetaOrDeployedHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false); admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); assertErrors(doFsck(conf, true) , new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testNotInMetaHole() throws Exception { TableName table = TableName.valueOf("tableNotInMetaHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, true, false); admin.enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); assertErrors(doFsck(conf, true) , new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testNotInHdfs() throws Exception { TableName table = TableName.valueOf("tableNotInHdfs"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.flush(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2, countRows()); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testNoHdfsTable() throws Exception { TableName table = TableName.valueOf("NoHdfsTable"); setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.flush(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), Bytes.toBytes("A"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""), false, false, true); deleteTableDir(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS,}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertFalse("Table " + table + " should have been deleted", admin.tableExists(table)); } |
ChoreService implements ChoreServicer { public synchronized boolean scheduleChore(ScheduledChore chore) { if (chore == null) { return false; } try { chore.setChoreServicer(this); ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(), chore.getTimeUnit()); scheduledChores.put(chore, future); return true; } catch (Exception exception) { if (LOG.isInfoEnabled()) { LOG.info("Could not successfully schedule chore: " + chore.getName()); } return false; } } @VisibleForTesting ChoreService(final String coreThreadPoolPrefix); ChoreService(final String coreThreadPoolPrefix, final boolean jitter); ChoreService(final String coreThreadPoolPrefix, int corePoolSize, boolean jitter); static ChoreService getInstance(final String coreThreadPoolPrefix); synchronized boolean scheduleChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore); @Override synchronized void cancelChore(ScheduledChore chore, boolean mayInterruptIfRunning); @Override synchronized boolean isChoreScheduled(ScheduledChore chore); @Override synchronized boolean triggerNow(ScheduledChore chore); @Override synchronized void onChoreMissedStartTime(ScheduledChore chore); synchronized void shutdown(); boolean isShutdown(); boolean isTerminated(); final static int MIN_CORE_POOL_SIZE; } | @Test (timeout=20000) public void testShutdownCancelsScheduledChores() throws InterruptedException { final int period = 100; ChoreService service = new ChoreService("testShutdownCancelsScheduledChores"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); try { assertTrue(service.scheduleChore(successChore1)); assertTrue(successChore1.isScheduled()); assertTrue(service.scheduleChore(successChore2)); assertTrue(successChore2.isScheduled()); assertTrue(service.scheduleChore(successChore3)); assertTrue(successChore3.isScheduled()); } finally { shutdownService(service); } assertFalse(successChore1.isScheduled()); assertFalse(successChore2.isScheduled()); assertFalse(successChore3.isScheduled()); }
@Test (timeout=20000) public void testShutdownRejectsNewSchedules() throws InterruptedException { final int period = 100; ChoreService service = new ChoreService("testShutdownRejectsNewSchedules"); ScheduledChore successChore1 = new DoNothingChore("sc1", period); ScheduledChore successChore2 = new DoNothingChore("sc2", period); ScheduledChore successChore3 = new DoNothingChore("sc3", period); ScheduledChore failChore1 = new DoNothingChore("fc1", period); ScheduledChore failChore2 = new DoNothingChore("fc2", period); ScheduledChore failChore3 = new DoNothingChore("fc3", period); try { assertTrue(service.scheduleChore(successChore1)); assertTrue(successChore1.isScheduled()); assertTrue(service.scheduleChore(successChore2)); assertTrue(successChore2.isScheduled()); assertTrue(service.scheduleChore(successChore3)); assertTrue(successChore3.isScheduled()); } finally { shutdownService(service); } assertFalse(service.scheduleChore(failChore1)); assertFalse(failChore1.isScheduled()); assertFalse(service.scheduleChore(failChore2)); assertFalse(failChore2.isScheduled()); assertFalse(service.scheduleChore(failChore3)); assertFalse(failChore3.isScheduled()); } |
ZKConfig { public static Properties makeZKProps(Configuration conf) { Properties zkProperties = makeZKPropsFromZooCfg(conf); if (zkProperties == null) { zkProperties = makeZKPropsFromHbaseConfig(conf); } return zkProperties; } private ZKConfig(); static Properties makeZKProps(Configuration conf); @Deprecated static Properties parseZooCfg(Configuration conf,
InputStream inputStream); static String getZKQuorumServersString(Configuration conf); static String buildZKQuorumServerString(String[] serverHosts, String clientPort); static void validateClusterKey(String key); static ZKClusterKey transformClusterKey(String key); static String getZooKeeperClusterKey(Configuration conf); static String getZooKeeperClusterKey(Configuration conf, String name); @VisibleForTesting static String standardizeZKQuorumServerString(String quorumStringInput,
String clientPort); } | @Test public void testZKConfigLoading() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181); Properties props = ZKConfig.makeZKProps(conf); assertEquals("Property client port should have been default from the HBase config", "2181", props.getProperty("clientPort")); } |
ZKConfig { public static String getZooKeeperClusterKey(Configuration conf) { return getZooKeeperClusterKey(conf, null); } private ZKConfig(); static Properties makeZKProps(Configuration conf); @Deprecated static Properties parseZooCfg(Configuration conf,
InputStream inputStream); static String getZKQuorumServersString(Configuration conf); static String buildZKQuorumServerString(String[] serverHosts, String clientPort); static void validateClusterKey(String key); static ZKClusterKey transformClusterKey(String key); static String getZooKeeperClusterKey(Configuration conf); static String getZooKeeperClusterKey(Configuration conf, String name); @VisibleForTesting static String standardizeZKQuorumServerString(String quorumStringInput,
String clientPort); } | @Test public void testGetZooKeeperClusterKey() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n"); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333"); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase"); String clusterKey = ZKConfig.getZooKeeperClusterKey(conf, "test"); assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n")); assertEquals("localhost:3333:hbase,test", clusterKey); } |
ZKConfig { public static void validateClusterKey(String key) throws IOException { transformClusterKey(key); } private ZKConfig(); static Properties makeZKProps(Configuration conf); @Deprecated static Properties parseZooCfg(Configuration conf,
InputStream inputStream); static String getZKQuorumServersString(Configuration conf); static String buildZKQuorumServerString(String[] serverHosts, String clientPort); static void validateClusterKey(String key); static ZKClusterKey transformClusterKey(String key); static String getZooKeeperClusterKey(Configuration conf); static String getZooKeeperClusterKey(Configuration conf, String name); @VisibleForTesting static String standardizeZKQuorumServerString(String quorumStringInput,
String clientPort); } | @Test public void testClusterKey() throws Exception { testKey("server", 2181, "hbase"); testKey("server1,server2,server3", 2181, "hbase"); try { ZKConfig.validateClusterKey("2181:hbase"); } catch (IOException ex) { } } |
CellUtil { public static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables) { return new CellScanner() { private final Iterator<? extends CellScannable> iterator = cellScannerables.iterator(); private CellScanner cellScanner = null; @Override public Cell current() { return this.cellScanner != null? this.cellScanner.current(): null; } @Override public boolean advance() throws IOException { while (true) { if (this.cellScanner == null) { if (!this.iterator.hasNext()) return false; this.cellScanner = this.iterator.next().cellScanner(); } if (this.cellScanner.advance()) return true; this.cellScanner = null; } } }; } static ByteRange fillRowRange(Cell cell, ByteRange range); static ByteRange fillFamilyRange(Cell cell, ByteRange range); static ByteRange fillQualifierRange(Cell cell, ByteRange range); static ByteRange fillValueRange(Cell cell, ByteRange range); static ByteRange fillTagRange(Cell cell, ByteRange range); static byte[] cloneRow(Cell cell); static byte[] cloneFamily(Cell cell); static byte[] cloneQualifier(Cell cell); static byte[] cloneValue(Cell cell); static byte[] getTagArray(Cell cell); static int copyRowTo(Cell cell, byte[] destination, int destinationOffset); static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset); static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset); static int copyValueTo(Cell cell, byte[] destination, int destinationOffset); static int copyTagTo(Cell cell, byte[] destination, int destinationOffset); static byte getRowByte(Cell cell, int index); static ByteBuffer getValueBufferShallowCopy(Cell cell); static ByteBuffer getQualifierBufferShallowCopy(Cell cell); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier,
final long timestamp, final byte type, final byte [] value); static Cell createCell(final byte [] rowArray, final int rowOffset, final int rowLength,
final byte [] familyArray, final int familyOffset, final int familyLength,
final byte [] qualifierArray, final int qualifierOffset, final int qualifierLength); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, byte[] tags, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, Type type, final byte[] value, byte[] tags); static Cell createCell(final byte [] row); static Cell createCell(final byte [] row, final byte [] value); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier); static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables); static CellScanner createCellScanner(final Iterable<Cell> cellIterable); static CellScanner createCellScanner(final Iterator<Cell> cells); static CellScanner createCellScanner(final Cell[] cellArray); static CellScanner createCellScanner(final NavigableMap<byte [], List<Cell>> map); static boolean matchingRow(final Cell left, final Cell right); static boolean matchingRow(final Cell left, final byte[] buf); static boolean matchingRow(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingFamily(final Cell left, final Cell right); static boolean matchingFamily(final Cell left, final byte[] buf); static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingQualifier(final Cell left, final Cell right); static boolean matchingQualifier(final Cell left, final byte[] buf); static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingColumn(final Cell left, final byte[] fam, final byte[] qual); static boolean matchingColumn(final Cell left, final byte[] fam, final int foffset,
final int flength, final byte[] qual, final int qoffset, final int qlength); static boolean matchingColumn(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final byte[] buf); static boolean matchingTimestamp(Cell a, Cell b); static boolean isDelete(final Cell cell); static boolean isDelete(final byte type); static boolean isDeleteType(Cell cell); static boolean isDeleteFamily(final Cell cell); static boolean isDeleteFamilyVersion(final Cell cell); static boolean isDeleteColumns(final Cell cell); static boolean isDeleteColumnVersion(final Cell cell); static boolean isDeleteColumnOrFamily(Cell cell); @Deprecated static int estimatedSizeOf(final Cell cell); static int estimatedSerializedSizeOf(final Cell cell); static int estimatedSerializedSizeOfKey(final Cell cell); static long estimatedHeapSizeOf(final Cell cell); @Deprecated static long estimatedHeapSizeOfWithoutTags(final Cell cell); static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length); static boolean overlappingKeys(final byte[] start1, final byte[] end1,
final byte[] start2, final byte[] end2); @InterfaceAudience.Private static void setSequenceId(Cell cell, long seqId); static void setTimestamp(Cell cell, long ts); static void setTimestamp(Cell cell, byte[] ts, int tsOffset); static boolean updateLatestStamp(Cell cell, long ts); static boolean updateLatestStamp(Cell cell, byte[] ts, int tsOffset); static void writeFlatKey(Cell cell, DataOutputStream out); static String getCellKeyAsString(Cell cell); static byte [] getCellKeySerializedAsKeyValueKey(final Cell cell); static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
DataOutputStream out); static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck,
boolean withTsType); static String toString(Cell cell, boolean verbose); static boolean equals(Cell a, Cell b); static boolean matchingType(Cell a, Cell b); } | @Test public void testCreateCellScannerOverflow() throws IOException { consume(doCreateCellScanner(1, 1), 1 * 1); consume(doCreateCellScanner(3, 0), 3 * 0); consume(doCreateCellScanner(3, 3), 3 * 3); consume(doCreateCellScanner(0, 1), 0 * 1); final int hundredK = 100000; consume(doCreateCellScanner(hundredK, 0), hundredK * 0); consume(doCreateCellArray(1), 1); consume(doCreateCellArray(0), 0); consume(doCreateCellArray(3), 3); List<CellScannable> cells = new ArrayList<CellScannable>(hundredK); for (int i = 0; i < hundredK; i++) { cells.add(new TestCellScannable(1)); } consume(CellUtil.createCellScanner(cells), hundredK * 1); NavigableMap<byte [], List<Cell>> m = new TreeMap<byte [], List<Cell>>(Bytes.BYTES_COMPARATOR); List<Cell> cellArray = new ArrayList<Cell>(hundredK); for (int i = 0; i < hundredK; i++) cellArray.add(new TestCell(i)); m.put(new byte [] {'f'}, cellArray); consume(CellUtil.createCellScanner(m), hundredK * 1); } |
CellUtil { public static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2, final byte[] end2) { return (end2.length == 0 || start1.length == 0 || Bytes.compareTo(start1, end2) < 0) && (end1.length == 0 || start2.length == 0 || Bytes.compareTo(start2, end1) < 0); } static ByteRange fillRowRange(Cell cell, ByteRange range); static ByteRange fillFamilyRange(Cell cell, ByteRange range); static ByteRange fillQualifierRange(Cell cell, ByteRange range); static ByteRange fillValueRange(Cell cell, ByteRange range); static ByteRange fillTagRange(Cell cell, ByteRange range); static byte[] cloneRow(Cell cell); static byte[] cloneFamily(Cell cell); static byte[] cloneQualifier(Cell cell); static byte[] cloneValue(Cell cell); static byte[] getTagArray(Cell cell); static int copyRowTo(Cell cell, byte[] destination, int destinationOffset); static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset); static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset); static int copyValueTo(Cell cell, byte[] destination, int destinationOffset); static int copyTagTo(Cell cell, byte[] destination, int destinationOffset); static byte getRowByte(Cell cell, int index); static ByteBuffer getValueBufferShallowCopy(Cell cell); static ByteBuffer getQualifierBufferShallowCopy(Cell cell); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier,
final long timestamp, final byte type, final byte [] value); static Cell createCell(final byte [] rowArray, final int rowOffset, final int rowLength,
final byte [] familyArray, final int familyOffset, final int familyLength,
final byte [] qualifierArray, final int qualifierOffset, final int qualifierLength); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, byte[] tags, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, Type type, final byte[] value, byte[] tags); static Cell createCell(final byte [] row); static Cell createCell(final byte [] row, final byte [] value); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier); static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables); static CellScanner createCellScanner(final Iterable<Cell> cellIterable); static CellScanner createCellScanner(final Iterator<Cell> cells); static CellScanner createCellScanner(final Cell[] cellArray); static CellScanner createCellScanner(final NavigableMap<byte [], List<Cell>> map); static boolean matchingRow(final Cell left, final Cell right); static boolean matchingRow(final Cell left, final byte[] buf); static boolean matchingRow(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingFamily(final Cell left, final Cell right); static boolean matchingFamily(final Cell left, final byte[] buf); static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingQualifier(final Cell left, final Cell right); static boolean matchingQualifier(final Cell left, final byte[] buf); static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingColumn(final Cell left, final byte[] fam, final byte[] qual); static boolean matchingColumn(final Cell left, final byte[] fam, final int foffset,
final int flength, final byte[] qual, final int qoffset, final int qlength); static boolean matchingColumn(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final byte[] buf); static boolean matchingTimestamp(Cell a, Cell b); static boolean isDelete(final Cell cell); static boolean isDelete(final byte type); static boolean isDeleteType(Cell cell); static boolean isDeleteFamily(final Cell cell); static boolean isDeleteFamilyVersion(final Cell cell); static boolean isDeleteColumns(final Cell cell); static boolean isDeleteColumnVersion(final Cell cell); static boolean isDeleteColumnOrFamily(Cell cell); @Deprecated static int estimatedSizeOf(final Cell cell); static int estimatedSerializedSizeOf(final Cell cell); static int estimatedSerializedSizeOfKey(final Cell cell); static long estimatedHeapSizeOf(final Cell cell); @Deprecated static long estimatedHeapSizeOfWithoutTags(final Cell cell); static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length); static boolean overlappingKeys(final byte[] start1, final byte[] end1,
final byte[] start2, final byte[] end2); @InterfaceAudience.Private static void setSequenceId(Cell cell, long seqId); static void setTimestamp(Cell cell, long ts); static void setTimestamp(Cell cell, byte[] ts, int tsOffset); static boolean updateLatestStamp(Cell cell, long ts); static boolean updateLatestStamp(Cell cell, byte[] ts, int tsOffset); static void writeFlatKey(Cell cell, DataOutputStream out); static String getCellKeyAsString(Cell cell); static byte [] getCellKeySerializedAsKeyValueKey(final Cell cell); static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
DataOutputStream out); static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck,
boolean withTsType); static String toString(Cell cell, boolean verbose); static boolean equals(Cell a, Cell b); static boolean matchingType(Cell a, Cell b); } | @Test public void testOverlappingKeys() { byte[] empty = HConstants.EMPTY_BYTE_ARRAY; byte[] a = Bytes.toBytes("a"); byte[] b = Bytes.toBytes("b"); byte[] c = Bytes.toBytes("c"); byte[] d = Bytes.toBytes("d"); Assert.assertTrue(CellUtil.overlappingKeys(a, b, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, c, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(b, c, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, c, b, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, d, b, c)); Assert.assertTrue(CellUtil.overlappingKeys(b, c, a, d)); Assert.assertTrue(CellUtil.overlappingKeys(empty, b, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(empty, b, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, empty, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, empty, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, empty)); Assert.assertTrue(CellUtil.overlappingKeys(empty, empty, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(a, b, c, d)); Assert.assertFalse(CellUtil.overlappingKeys(c, d, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, c, d)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, c, empty)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, d, empty)); Assert.assertFalse(CellUtil.overlappingKeys(c, d, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(c, empty, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(d, empty, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, empty, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, empty, a)); Assert.assertFalse(CellUtil.overlappingKeys(a,b, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(empty, b, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(empty, a, b, c)); } |
CellUtil { public static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck, boolean withTsType) { short rLen1 = c1.getRowLength(); short rLen2 = c2.getRowLength(); int commonPrefix = KeyValue.ROW_LENGTH_SIZE; if (rLen1 != rLen2) { return ByteBufferUtils.findCommonPrefix(Bytes.toBytes(rLen1), 0, KeyValue.ROW_LENGTH_SIZE, Bytes.toBytes(rLen2), 0, KeyValue.ROW_LENGTH_SIZE); } int rkCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getRowArray(), c1.getRowOffset(), rLen1, c2.getRowArray(), c2.getRowOffset(), rLen2); commonPrefix += rkCommonPrefix; if (rkCommonPrefix != rLen1) { return commonPrefix; } byte fLen1 = c1.getFamilyLength(); if (bypassFamilyCheck) { commonPrefix += KeyValue.FAMILY_LENGTH_SIZE + fLen1; } else { byte fLen2 = c2.getFamilyLength(); if (fLen1 != fLen2) { return commonPrefix; } commonPrefix += KeyValue.FAMILY_LENGTH_SIZE; int fCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getFamilyArray(), c1.getFamilyOffset(), fLen1, c2.getFamilyArray(), c2.getFamilyOffset(), fLen2); commonPrefix += fCommonPrefix; if (fCommonPrefix != fLen1) { return commonPrefix; } } int qLen1 = c1.getQualifierLength(); int qLen2 = c2.getQualifierLength(); int qCommon = ByteBufferUtils.findCommonPrefix(c1.getQualifierArray(), c1.getQualifierOffset(), qLen1, c2.getQualifierArray(), c2.getQualifierOffset(), qLen2); commonPrefix += qCommon; if (!withTsType || Math.max(qLen1, qLen2) != qCommon) { return commonPrefix; } int tsCommonPrefix = ByteBufferUtils.findCommonPrefix(Bytes.toBytes(c1.getTimestamp()), 0, KeyValue.TIMESTAMP_SIZE, Bytes.toBytes(c2.getTimestamp()), 0, KeyValue.TIMESTAMP_SIZE); commonPrefix += tsCommonPrefix; if (tsCommonPrefix != KeyValue.TIMESTAMP_SIZE) { return commonPrefix; } if (c1.getTypeByte() == c2.getTypeByte()) { commonPrefix += KeyValue.TYPE_SIZE; } return commonPrefix; } static ByteRange fillRowRange(Cell cell, ByteRange range); static ByteRange fillFamilyRange(Cell cell, ByteRange range); static ByteRange fillQualifierRange(Cell cell, ByteRange range); static ByteRange fillValueRange(Cell cell, ByteRange range); static ByteRange fillTagRange(Cell cell, ByteRange range); static byte[] cloneRow(Cell cell); static byte[] cloneFamily(Cell cell); static byte[] cloneQualifier(Cell cell); static byte[] cloneValue(Cell cell); static byte[] getTagArray(Cell cell); static int copyRowTo(Cell cell, byte[] destination, int destinationOffset); static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset); static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset); static int copyValueTo(Cell cell, byte[] destination, int destinationOffset); static int copyTagTo(Cell cell, byte[] destination, int destinationOffset); static byte getRowByte(Cell cell, int index); static ByteBuffer getValueBufferShallowCopy(Cell cell); static ByteBuffer getQualifierBufferShallowCopy(Cell cell); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier,
final long timestamp, final byte type, final byte [] value); static Cell createCell(final byte [] rowArray, final int rowOffset, final int rowLength,
final byte [] familyArray, final int familyOffset, final int familyLength,
final byte [] qualifierArray, final int qualifierOffset, final int qualifierLength); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, byte[] tags, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, Type type, final byte[] value, byte[] tags); static Cell createCell(final byte [] row); static Cell createCell(final byte [] row, final byte [] value); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier); static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables); static CellScanner createCellScanner(final Iterable<Cell> cellIterable); static CellScanner createCellScanner(final Iterator<Cell> cells); static CellScanner createCellScanner(final Cell[] cellArray); static CellScanner createCellScanner(final NavigableMap<byte [], List<Cell>> map); static boolean matchingRow(final Cell left, final Cell right); static boolean matchingRow(final Cell left, final byte[] buf); static boolean matchingRow(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingFamily(final Cell left, final Cell right); static boolean matchingFamily(final Cell left, final byte[] buf); static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingQualifier(final Cell left, final Cell right); static boolean matchingQualifier(final Cell left, final byte[] buf); static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingColumn(final Cell left, final byte[] fam, final byte[] qual); static boolean matchingColumn(final Cell left, final byte[] fam, final int foffset,
final int flength, final byte[] qual, final int qoffset, final int qlength); static boolean matchingColumn(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final byte[] buf); static boolean matchingTimestamp(Cell a, Cell b); static boolean isDelete(final Cell cell); static boolean isDelete(final byte type); static boolean isDeleteType(Cell cell); static boolean isDeleteFamily(final Cell cell); static boolean isDeleteFamilyVersion(final Cell cell); static boolean isDeleteColumns(final Cell cell); static boolean isDeleteColumnVersion(final Cell cell); static boolean isDeleteColumnOrFamily(Cell cell); @Deprecated static int estimatedSizeOf(final Cell cell); static int estimatedSerializedSizeOf(final Cell cell); static int estimatedSerializedSizeOfKey(final Cell cell); static long estimatedHeapSizeOf(final Cell cell); @Deprecated static long estimatedHeapSizeOfWithoutTags(final Cell cell); static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length); static boolean overlappingKeys(final byte[] start1, final byte[] end1,
final byte[] start2, final byte[] end2); @InterfaceAudience.Private static void setSequenceId(Cell cell, long seqId); static void setTimestamp(Cell cell, long ts); static void setTimestamp(Cell cell, byte[] ts, int tsOffset); static boolean updateLatestStamp(Cell cell, long ts); static boolean updateLatestStamp(Cell cell, byte[] ts, int tsOffset); static void writeFlatKey(Cell cell, DataOutputStream out); static String getCellKeyAsString(Cell cell); static byte [] getCellKeySerializedAsKeyValueKey(final Cell cell); static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
DataOutputStream out); static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck,
boolean withTsType); static String toString(Cell cell, boolean verbose); static boolean equals(Cell a, Cell b); static boolean matchingType(Cell a, Cell b); } | @Test public void testFindCommonPrefixInFlatKey() { KeyValue kv1 = new KeyValue("r1".getBytes(), "f1".getBytes(), "q1".getBytes(), null); Assert.assertEquals(kv1.getKeyLength(), CellUtil.findCommonPrefixInFlatKey(kv1, kv1, true, true)); Assert.assertEquals(kv1.getKeyLength(), CellUtil.findCommonPrefixInFlatKey(kv1, kv1, false, true)); Assert.assertEquals(kv1.getKeyLength() - KeyValue.TIMESTAMP_TYPE_SIZE, CellUtil.findCommonPrefixInFlatKey(kv1, kv1, true, false)); KeyValue kv2 = new KeyValue("r12".getBytes(), "f1".getBytes(), "q1".getBytes(), null); Assert.assertEquals(1, CellUtil.findCommonPrefixInFlatKey(kv1, kv2, true, true)); KeyValue kv3 = new KeyValue("r14".getBytes(), "f1".getBytes(), "q1".getBytes(), null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + "r1".getBytes().length, CellUtil.findCommonPrefixInFlatKey(kv2, kv3, true, true)); KeyValue kv4 = new KeyValue("r14".getBytes(), "f2".getBytes(), "q1".getBytes(), null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv3.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE + "f".getBytes().length, CellUtil.findCommonPrefixInFlatKey(kv3, kv4, false, true)); KeyValue kv5 = new KeyValue("r14".getBytes(), "f2".getBytes(), "q123".getBytes(), null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv3.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE + kv4.getFamilyLength() + kv4.getQualifierLength(), CellUtil.findCommonPrefixInFlatKey(kv4, kv5, true, true)); KeyValue kv6 = new KeyValue("rk".getBytes(), 1234L); KeyValue kv7 = new KeyValue("rk".getBytes(), 1235L); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE + kv6.getFamilyLength() + kv6.getQualifierLength() + 7, CellUtil.findCommonPrefixInFlatKey(kv6, kv7, true, true)); KeyValue kv8 = new KeyValue("rk".getBytes(), 1234L, Type.Delete); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE + kv6.getFamilyLength() + kv6.getQualifierLength() + KeyValue.TIMESTAMP_SIZE, CellUtil.findCommonPrefixInFlatKey(kv6, kv8, true, true)); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE + kv6.getFamilyLength() + kv6.getQualifierLength(), CellUtil.findCommonPrefixInFlatKey(kv6, kv8, true, false)); } |
CellUtil { public static String toString(Cell cell, boolean verbose) { if (cell == null) { return ""; } StringBuilder builder = new StringBuilder(); String keyStr = getCellKeyAsString(cell); String tag = null; String value = null; if (verbose) { tag = Bytes.toStringBinary(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); value = Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } builder .append(keyStr); if (tag != null && !tag.isEmpty()) { builder.append("/").append(tag); } if (value != null) { builder.append("/").append(value); } return builder.toString(); } static ByteRange fillRowRange(Cell cell, ByteRange range); static ByteRange fillFamilyRange(Cell cell, ByteRange range); static ByteRange fillQualifierRange(Cell cell, ByteRange range); static ByteRange fillValueRange(Cell cell, ByteRange range); static ByteRange fillTagRange(Cell cell, ByteRange range); static byte[] cloneRow(Cell cell); static byte[] cloneFamily(Cell cell); static byte[] cloneQualifier(Cell cell); static byte[] cloneValue(Cell cell); static byte[] getTagArray(Cell cell); static int copyRowTo(Cell cell, byte[] destination, int destinationOffset); static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset); static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset); static int copyValueTo(Cell cell, byte[] destination, int destinationOffset); static int copyTagTo(Cell cell, byte[] destination, int destinationOffset); static byte getRowByte(Cell cell, int index); static ByteBuffer getValueBufferShallowCopy(Cell cell); static ByteBuffer getQualifierBufferShallowCopy(Cell cell); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier,
final long timestamp, final byte type, final byte [] value); static Cell createCell(final byte [] rowArray, final int rowOffset, final int rowLength,
final byte [] familyArray, final int familyOffset, final int familyLength,
final byte [] qualifierArray, final int qualifierOffset, final int qualifierLength); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, final byte type, final byte[] value, byte[] tags, final long memstoreTS); @InterfaceAudience.Private static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier,
final long timestamp, Type type, final byte[] value, byte[] tags); static Cell createCell(final byte [] row); static Cell createCell(final byte [] row, final byte [] value); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier); static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables); static CellScanner createCellScanner(final Iterable<Cell> cellIterable); static CellScanner createCellScanner(final Iterator<Cell> cells); static CellScanner createCellScanner(final Cell[] cellArray); static CellScanner createCellScanner(final NavigableMap<byte [], List<Cell>> map); static boolean matchingRow(final Cell left, final Cell right); static boolean matchingRow(final Cell left, final byte[] buf); static boolean matchingRow(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingFamily(final Cell left, final Cell right); static boolean matchingFamily(final Cell left, final byte[] buf); static boolean matchingFamily(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingQualifier(final Cell left, final Cell right); static boolean matchingQualifier(final Cell left, final byte[] buf); static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset,
final int length); static boolean matchingColumn(final Cell left, final byte[] fam, final byte[] qual); static boolean matchingColumn(final Cell left, final byte[] fam, final int foffset,
final int flength, final byte[] qual, final int qoffset, final int qlength); static boolean matchingColumn(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final byte[] buf); static boolean matchingTimestamp(Cell a, Cell b); static boolean isDelete(final Cell cell); static boolean isDelete(final byte type); static boolean isDeleteType(Cell cell); static boolean isDeleteFamily(final Cell cell); static boolean isDeleteFamilyVersion(final Cell cell); static boolean isDeleteColumns(final Cell cell); static boolean isDeleteColumnVersion(final Cell cell); static boolean isDeleteColumnOrFamily(Cell cell); @Deprecated static int estimatedSizeOf(final Cell cell); static int estimatedSerializedSizeOf(final Cell cell); static int estimatedSerializedSizeOfKey(final Cell cell); static long estimatedHeapSizeOf(final Cell cell); @Deprecated static long estimatedHeapSizeOfWithoutTags(final Cell cell); static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length); static boolean overlappingKeys(final byte[] start1, final byte[] end1,
final byte[] start2, final byte[] end2); @InterfaceAudience.Private static void setSequenceId(Cell cell, long seqId); static void setTimestamp(Cell cell, long ts); static void setTimestamp(Cell cell, byte[] ts, int tsOffset); static boolean updateLatestStamp(Cell cell, long ts); static boolean updateLatestStamp(Cell cell, byte[] ts, int tsOffset); static void writeFlatKey(Cell cell, DataOutputStream out); static String getCellKeyAsString(Cell cell); static byte [] getCellKeySerializedAsKeyValueKey(final Cell cell); static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix,
DataOutputStream out); static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck,
boolean withTsType); static String toString(Cell cell, boolean verbose); static boolean equals(Cell a, Cell b); static boolean matchingType(Cell a, Cell b); } | @Test public void testToString() { byte [] row = Bytes.toBytes("row"); long ts = 123l; KeyValue kv = new KeyValue(row, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ts, KeyValue.Type.Minimum, HConstants.EMPTY_BYTE_ARRAY); Cell cell = CellUtil.createCell(row, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ts, KeyValue.Type.Minimum.getCode(), HConstants.EMPTY_BYTE_ARRAY); String cellToString = CellUtil.getCellKeyAsString(cell); assertEquals(kv.toString(), cellToString); byte [] f = new byte [] {'f'}; byte [] q = new byte [] {'q'}; kv = new KeyValue(row, f, q, ts, KeyValue.Type.Minimum, HConstants.EMPTY_BYTE_ARRAY); cell = CellUtil.createCell(row, f, q, ts, KeyValue.Type.Minimum.getCode(), HConstants.EMPTY_BYTE_ARRAY); cellToString = CellUtil.getCellKeyAsString(cell); assertEquals(kv.toString(), cellToString); } |
HBaseFsck extends Configured implements Closeable { public int getRetCode() { return retcode; } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); @Override void close(); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table); static void setDisplayFullReport(); static void setForceExclusive(); boolean isExclusive(); static void setDisableBalancer(); boolean shouldDisableBalancer(); void setFixTableLocks(boolean shouldFix); void setFixTableZNodes(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf,
Path p); static void debugLsr(Configuration conf,
Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; } | @Test (timeout=180000) public void testParallelHbck() throws Exception { final ExecutorService service; final Future<HBaseFsck> hbck1,hbck2; class RunHbck implements Callable<HBaseFsck>{ boolean fail = true; @Override public HBaseFsck call(){ Configuration c = new Configuration(conf); c.setInt("hbase.hbck.lockfile.attempts", 1); c.setInt("hbase.hbck.lockfile.maxwaittime", 3); try{ return doFsck(c, true); } catch(Exception e){ if (e.getMessage().contains("Duplicate hbck")) { fail = false; } } if (fail) fail(); return null; } } service = Executors.newFixedThreadPool(2); hbck1 = service.submit(new RunHbck()); hbck2 = service.submit(new RunHbck()); service.shutdown(); service.awaitTermination(15, TimeUnit.SECONDS); HBaseFsck h1 = hbck1.get(); HBaseFsck h2 = hbck2.get(); assert(h1 == null || h2 == null); if (h1 != null) { assert(h1.getRetCode() >= 0); } if (h2 != null) { assert(h2.getRetCode() >= 0); } }
@Test (timeout=180000) public void testParallelWithRetriesHbck() throws Exception { final ExecutorService service; final Future<HBaseFsck> hbck1,hbck2; final int timeoutInSeconds = 80; final int sleepIntervalInMilliseconds = 200; final int maxSleepTimeInMilliseconds = 6000; final int maxRetryAttempts = 15; class RunHbck implements Callable<HBaseFsck>{ @Override public HBaseFsck call() throws Exception { Configuration c = new Configuration(conf); c.setInt("hbase.hbck.lockfile.maxwaittime", timeoutInSeconds); c.setInt("hbase.hbck.lockfile.attempt.sleep.interval", sleepIntervalInMilliseconds); c.setInt("hbase.hbck.lockfile.attempt.maxsleeptime", maxSleepTimeInMilliseconds); c.setInt("hbase.hbck.lockfile.attempts", maxRetryAttempts); return doFsck(c, false); } } service = Executors.newFixedThreadPool(2); hbck1 = service.submit(new RunHbck()); hbck2 = service.submit(new RunHbck()); service.shutdown(); service.awaitTermination(timeoutInSeconds * 2, TimeUnit.SECONDS); HBaseFsck h1 = hbck1.get(); HBaseFsck h2 = hbck2.get(); assertNotNull(h1); assertNotNull(h2); assert(h1.getRetCode() >= 0); assert(h2.getRetCode() >= 0); } |
FixedLengthWrapper implements DataType<T> { @Override public T decode(PositionedByteRange src) { if (src.getRemaining() < length) { throw new IllegalArgumentException("Not enough buffer remaining. src.offset: " + src.getOffset() + " src.length: " + src.getLength() + " src.position: " + src.getPosition() + " max length: " + length); } PositionedByteRange b = new SimplePositionedMutableByteRange(length); src.get(b.getBytes()); return base.decode(b); } FixedLengthWrapper(DataType<T> base, int length); int getLength(); @Override boolean isOrderPreserving(); @Override Order getOrder(); @Override boolean isNullable(); @Override boolean isSkippable(); @Override int encodedLength(T val); @Override Class<T> encodedClass(); @Override int skip(PositionedByteRange src); @Override T decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, T val); } | @Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingRead() { PositionedByteRange buff = new SimplePositionedMutableByteRange(0); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 3); type.decode(buff); } |
HBaseConfiguration extends Configuration { public static Configuration subset(Configuration srcConf, String prefix) { Configuration newConf = new Configuration(false); for (Map.Entry<String, String> entry : srcConf) { if (entry.getKey().startsWith(prefix)) { String newKey = entry.getKey().substring(prefix.length()); if (!newKey.isEmpty()) { newConf.set(newKey, entry.getValue()); } } } return newConf; } @Deprecated HBaseConfiguration(); @Deprecated HBaseConfiguration(final Configuration c); static Configuration addHbaseResources(Configuration conf); static Configuration create(); static Configuration create(final Configuration that); static void merge(Configuration destConf, Configuration srcConf); static Configuration subset(Configuration srcConf, String prefix); static void setWithPrefix(Configuration conf, String prefix,
Iterable<Map.Entry<String, String>> properties); static boolean isShowConfInServlet(); static int getInt(Configuration conf, String name,
String deprecatedName, int defaultValue); static String getPassword(Configuration conf, String alias,
String defPass); static Configuration createClusterConf(Configuration baseConf, String clusterKey); static Configuration createClusterConf(Configuration baseConf, String clusterKey,
String overridePrefix); static void main(String[] args); } | @Test public void testSubset() throws Exception { Configuration conf = HBaseConfiguration.create(); String prefix = "hbase.mapred.output."; conf.set("hbase.security.authentication", "kerberos"); conf.set("hbase.regionserver.kerberos.principal", "hbasesource"); HBaseConfiguration.setWithPrefix(conf, prefix, ImmutableMap.of( "hbase.regionserver.kerberos.principal", "hbasedest", "", "shouldbemissing") .entrySet()); Configuration subsetConf = HBaseConfiguration.subset(conf, prefix); assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal")); assertEquals("hbasedest", subsetConf.get("hbase.regionserver.kerberos.principal")); assertNull(subsetConf.get("hbase.security.authentication")); assertNull(subsetConf.get("")); Configuration mergedConf = HBaseConfiguration.create(conf); HBaseConfiguration.merge(mergedConf, subsetConf); assertEquals("hbasedest", mergedConf.get("hbase.regionserver.kerberos.principal")); assertEquals("kerberos", mergedConf.get("hbase.security.authentication")); assertEquals("shouldbemissing", mergedConf.get(prefix)); } |
HBaseConfiguration extends Configuration { public static String getPassword(Configuration conf, String alias, String defPass) throws IOException { String passwd = null; try { Method m = Configuration.class.getMethod("getPassword", String.class); char[] p = (char[]) m.invoke(conf, alias); if (p != null) { LOG.debug(String.format("Config option \"%s\" was found through" + " the Configuration getPassword method.", alias)); passwd = new String(p); } else { LOG.debug(String.format( "Config option \"%s\" was not found. Using provided default value", alias)); passwd = defPass; } } catch (NoSuchMethodException e) { LOG.debug(String.format( "Credential.getPassword method is not available." + " Falling back to configuration.")); passwd = conf.get(alias, defPass); } catch (SecurityException e) { throw new IOException(e.getMessage(), e); } catch (IllegalAccessException e) { throw new IOException(e.getMessage(), e); } catch (IllegalArgumentException e) { throw new IOException(e.getMessage(), e); } catch (InvocationTargetException e) { throw new IOException(e.getMessage(), e); } return passwd; } @Deprecated HBaseConfiguration(); @Deprecated HBaseConfiguration(final Configuration c); static Configuration addHbaseResources(Configuration conf); static Configuration create(); static Configuration create(final Configuration that); static void merge(Configuration destConf, Configuration srcConf); static Configuration subset(Configuration srcConf, String prefix); static void setWithPrefix(Configuration conf, String prefix,
Iterable<Map.Entry<String, String>> properties); static boolean isShowConfInServlet(); static int getInt(Configuration conf, String name,
String deprecatedName, int defaultValue); static String getPassword(Configuration conf, String alias,
String defPass); static Configuration createClusterConf(Configuration baseConf, String clusterKey); static Configuration createClusterConf(Configuration baseConf, String clusterKey,
String overridePrefix); static void main(String[] args); } | @Test public void testGetPassword() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH, "jceks: + new File(UTIL.getDataTestDir().toUri().getPath(), "foo.jks").getCanonicalPath()); ReflectiveCredentialProviderClient client = new ReflectiveCredentialProviderClient(); if (client.isHadoopCredentialProviderAvailable()) { char[] keyPass = { 'k', 'e', 'y', 'p', 'a', 's', 's' }; char[] storePass = { 's', 't', 'o', 'r', 'e', 'p', 'a', 's', 's' }; client.createEntry(conf, "ssl.keypass.alias", keyPass); client.createEntry(conf, "ssl.storepass.alias", storePass); String keypass = HBaseConfiguration.getPassword(conf, "ssl.keypass.alias", null); assertEquals(keypass, new String(keyPass)); String storepass = HBaseConfiguration.getPassword(conf, "ssl.storepass.alias", null); assertEquals(storepass, new String(storePass)); } } |
TimeoutBlockingQueue { public TimeoutBlockingQueue(TimeoutRetriever<? super E> timeoutRetriever) { this(32, timeoutRetriever); } TimeoutBlockingQueue(TimeoutRetriever<? super E> timeoutRetriever); @SuppressWarnings("unchecked") TimeoutBlockingQueue(int capacity, TimeoutRetriever<? super E> timeoutRetriever); void dump(); void clear(); void add(E e); @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") E poll(); int size(); boolean isEmpty(); void signalAll(); } | @Test public void testTimeoutBlockingQueue() { TimeoutBlockingQueue<TestObject> queue; int[][] testArray = new int[][] { {200, 400, 600}, {200, 400, 100}, {200, 400, 300}, }; for (int i = 0; i < testArray.length; ++i) { int[] sortedArray = Arrays.copyOf(testArray[i], testArray[i].length); Arrays.sort(sortedArray); queue = new TimeoutBlockingQueue<TestObject>(2, new TestObjectTimeoutRetriever()); for (int j = 0; j < testArray[i].length; ++j) { queue.add(new TestObject(j, testArray[i][j])); queue.dump(); } for (int j = 0; !queue.isEmpty(); ++j) { assertEquals(sortedArray[j], queue.poll().getTimeout()); } queue = new TimeoutBlockingQueue<TestObject>(2, new TestObjectTimeoutRetriever()); queue.add(new TestObject(0, 50)); assertEquals(50, queue.poll().getTimeout()); for (int j = 0; j < testArray[i].length; ++j) { queue.add(new TestObject(j, testArray[i][j])); queue.dump(); } for (int j = 0; !queue.isEmpty(); ++j) { assertEquals(sortedArray[j], queue.poll().getTimeout()); } } } |
ProcedureStoreTracker { public boolean isTracking(long minId, long maxId) { return map.floorEntry(minId) != null || map.floorEntry(maxId) != null; } void insert(long procId); void insert(final long procId, final long[] subProcIds); void update(long procId); void delete(long procId); long getUpdatedMinProcId(); long getUpdatedMaxProcId(); @InterfaceAudience.Private void setDeleted(final long procId, final boolean isDeleted); void reset(); DeleteState isDeleted(long procId); long getMinProcId(); void setKeepDeletes(boolean keepDeletes); void setPartialFlag(boolean isPartial); boolean isEmpty(); boolean isUpdated(); boolean isTracking(long minId, long maxId); void resetUpdates(); void undeleteAll(); void dump(); void writeTo(final OutputStream stream); void readFrom(final InputStream stream); } | @Test public void testIsTracking() { long[][] procIds = new long[][] {{4, 7}, {1024, 1027}, {8192, 8194}}; long[][] checkIds = new long[][] {{2, 8}, {1023, 1025}, {8193, 8191}}; ProcedureStoreTracker tracker = new ProcedureStoreTracker(); for (int i = 0; i < procIds.length; ++i) { long[] seq = procIds[i]; tracker.insert(seq[0]); tracker.insert(seq[1]); } for (int i = 0; i < procIds.length; ++i) { long[] check = checkIds[i]; long[] seq = procIds[i]; assertTrue(tracker.isTracking(seq[0], seq[1])); assertTrue(tracker.isTracking(check[0], check[1])); tracker.delete(seq[0]); tracker.delete(seq[1]); assertFalse(tracker.isTracking(seq[0], seq[1])); assertFalse(tracker.isTracking(check[0], check[1])); } assertTrue(tracker.isEmpty()); } |
ProcedureStoreTracker { public void delete(long procId) { Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId); assert entry != null : "expected node to delete procId=" + procId; BitSetNode node = entry.getValue(); assert node.contains(procId) : "expected procId in the node"; node.delete(procId); if (!keepDeletes && node.isEmpty()) { map.remove(entry.getKey()); } trackProcIds(procId); } void insert(long procId); void insert(final long procId, final long[] subProcIds); void update(long procId); void delete(long procId); long getUpdatedMinProcId(); long getUpdatedMaxProcId(); @InterfaceAudience.Private void setDeleted(final long procId, final boolean isDeleted); void reset(); DeleteState isDeleted(long procId); long getMinProcId(); void setKeepDeletes(boolean keepDeletes); void setPartialFlag(boolean isPartial); boolean isEmpty(); boolean isUpdated(); boolean isTracking(long minId, long maxId); void resetUpdates(); void undeleteAll(); void dump(); void writeTo(final OutputStream stream); void readFrom(final InputStream stream); } | @Test public void testDelete() { final ProcedureStoreTracker tracker = new ProcedureStoreTracker(); long[] procIds = new long[] { 65, 1, 193 }; for (int i = 0; i < procIds.length; ++i) { tracker.insert(procIds[i]); tracker.dump(); } for (int i = 0; i < (64 * 4); ++i) { boolean hasProc = false; for (int j = 0; j < procIds.length; ++j) { if (procIds[j] == i) { hasProc = true; break; } } if (hasProc) { assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(i)); } else { assertEquals("procId=" + i, ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(i)); } } } |
WALProcedureStore extends ProcedureStoreBase { @VisibleForTesting protected void periodicRollForTesting() throws IOException { lock.lock(); try { periodicRoll(); } finally { lock.unlock(); } } WALProcedureStore(final Configuration conf, final FileSystem fs, final Path logDir,
final LeaseRecovery leaseRecovery); @Override void start(int numSlots); @Override void stop(boolean abort); @Override int getNumThreads(); ProcedureStoreTracker getStoreTracker(); ArrayList<ProcedureWALFile> getActiveLogs(); Set<ProcedureWALFile> getCorruptedLogs(); @Override void recoverLease(); @Override void load(final ProcedureLoader loader); @Override void insert(final Procedure proc, final Procedure[] subprocs); @Override void update(final Procedure proc); @Override void delete(final long procId); Path getLogDir(); FileSystem getFileSystem(); } | @Test public void testEmptyRoll() throws Exception { for (int i = 0; i < 10; ++i) { procStore.periodicRollForTesting(); } FileStatus[] status = fs.listStatus(logDir); assertEquals(1, status.length); } |
WALProcedureStore extends ProcedureStoreBase { @Override public void load(final ProcedureLoader loader) throws IOException { if (logs.isEmpty()) { throw new RuntimeException("recoverLease() must be called before loading data"); } if (logs.size() == 1) { if (LOG.isDebugEnabled()) { LOG.debug("No state logs to replay."); } loader.setMaxProcId(0); loading.set(false); return; } Iterator<ProcedureWALFile> it = logs.descendingIterator(); it.next(); try { ProcedureWALFormat.load(it, storeTracker, new ProcedureWALFormat.Loader() { @Override public void setMaxProcId(long maxProcId) { loader.setMaxProcId(maxProcId); } @Override public void load(ProcedureIterator procIter) throws IOException { loader.load(procIter); } @Override public void handleCorrupted(ProcedureIterator procIter) throws IOException { loader.handleCorrupted(procIter); } @Override public void markCorruptedWAL(ProcedureWALFile log, IOException e) { if (corruptedLogs == null) { corruptedLogs = new HashSet<ProcedureWALFile>(); } corruptedLogs.add(log); } }); } finally { loading.set(false); } } WALProcedureStore(final Configuration conf, final FileSystem fs, final Path logDir,
final LeaseRecovery leaseRecovery); @Override void start(int numSlots); @Override void stop(boolean abort); @Override int getNumThreads(); ProcedureStoreTracker getStoreTracker(); ArrayList<ProcedureWALFile> getActiveLogs(); Set<ProcedureWALFile> getCorruptedLogs(); @Override void recoverLease(); @Override void load(final ProcedureLoader loader); @Override void insert(final Procedure proc, final Procedure[] subprocs); @Override void update(final Procedure proc); @Override void delete(final long procId); Path getLogDir(); FileSystem getFileSystem(); } | @Test public void testLoad() throws Exception { Set<Long> procIds = new HashSet<>(); Procedure proc1 = new TestSequentialProcedure(); procIds.add(proc1.getProcId()); procStore.insert(proc1, null); Procedure proc2 = new TestSequentialProcedure(); Procedure[] child2 = new Procedure[2]; child2[0] = new TestSequentialProcedure(); child2[1] = new TestSequentialProcedure(); procIds.add(proc2.getProcId()); procIds.add(child2[0].getProcId()); procIds.add(child2[1].getProcId()); procStore.insert(proc2, child2); verifyProcIdsOnRestart(procIds); procStore.update(proc1); procStore.update(child2[1]); procStore.delete(child2[1].getProcId()); procIds.remove(child2[1].getProcId()); verifyProcIdsOnRestart(procIds); procStore.stop(false); FileStatus[] logs = fs.listStatus(logDir); assertEquals(3, logs.length); for (int i = 0; i < logs.length; ++i) { corruptLog(logs[i], 4); } verifyProcIdsOnRestart(procIds); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean exists(ByteBuffer table, TGet get) throws TIOError, TException { Table htable = getTable(table); try { return htable.exists(getFromThrift(get)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testExists() throws TIOError, TException { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testExists".getBytes(); ByteBuffer table = wrap(tableAname); TGet get = new TGet(wrap(rowName)); assertFalse(handler.exists(table, get)); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); assertTrue(handler.exists(table, get)); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes) throws TIOError, TException { Table htable = getTable(table); try { htable.delete(deletesFromThrift(deletes)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } return Collections.emptyList(); } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testDeleteMultiple() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); ByteBuffer table = wrap(tableAname); byte[] rowName1 = "testDeleteMultiple1".getBytes(); byte[] rowName2 = "testDeleteMultiple2".getBytes(); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); List<TPut> puts = new ArrayList<TPut>(); puts.add(new TPut(wrap(rowName1), columnValues)); puts.add(new TPut(wrap(rowName2), columnValues)); handler.putMultiple(table, puts); List<TDelete> deletes = new ArrayList<TDelete>(); deletes.add(new TDelete(wrap(rowName1))); deletes.add(new TDelete(wrap(rowName2))); List<TDelete> deleteResults = handler.deleteMultiple(table, deletes); assertEquals(0, deleteResults.size()); assertFalse(handler.exists(table, new TGet(wrap(rowName1)))); assertFalse(handler.exists(table, new TGet(wrap(rowName2)))); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult increment(ByteBuffer table, TIncrement increment) throws TIOError, TException { Table htable = getTable(table); try { return resultFromHBase(htable.increment(incrementFromThrift(increment))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testIncrement() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testIncrement".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement increment = new TIncrement(wrap(rowName), incrementColumns); handler.increment(table, increment); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertEquals(1, result.getColumnValuesSize()); TColumnValue columnValue = result.getColumnValues().get(0); assertArrayEquals(Bytes.toBytes(2L), columnValue.getValue()); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult append(ByteBuffer table, TAppend append) throws TIOError, TException { Table htable = getTable(table); try { return resultFromHBase(htable.append(appendFromThrift(append))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testAppend() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testAppend".getBytes(); ByteBuffer table = wrap(tableAname); byte[] v1 = Bytes.toBytes("42"); byte[] v2 = Bytes.toBytes("23"); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); List<TColumnValue> appendColumns = new ArrayList<TColumnValue>(); appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2))); TAppend append = new TAppend(wrap(rowName), appendColumns); handler.append(table, append); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertEquals(1, result.getColumnValuesSize()); TColumnValue columnValue = result.getColumnValues().get(0); assertArrayEquals(Bytes.add(v1, v2), columnValue.getValue()); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put) throws TIOError, TException { Table htable = getTable(table); try { return htable.checkAndPut(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), (value == null) ? null : byteBufferToByteArray(value), putFromThrift(put)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testCheckAndPut() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testCheckAndPut".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); assertFalse(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), putB)); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertEquals(0, result.getColumnValuesSize()); handler.put(table, putA); assertTrue(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), putB)); result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List<TColumnValue> returnedColumnValues = result.getColumnValues(); List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueA); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle) throws TIOError, TException { Table htable = getTable(table); try { if (value == null) { return htable.checkAndDelete(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), null, deleteFromThrift(deleteSingle)); } else { return htable.checkAndDelete(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), byteBufferToByteArray(value), deleteFromThrift(deleteSingle)); } } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testCheckAndDelete() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testCheckAndDelete".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); handler.put(table, putB); TDelete delete = new TDelete(wrap(rowName)); assertFalse(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete)); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertTColumnValuesEqual(columnValuesB, result.getColumnValues()); handler.put(table, putA); assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete)); result = handler.get(table, get); assertFalse(result.isSetRow()); assertEquals(0, result.getColumnValuesSize()); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows) throws TIOError, TException { Table htable = getTable(table); List<TResult> results = null; ResultScanner scanner = null; try { scanner = htable.getScanner(scanFromThrift(scan)); results = resultsFromHBase(scanner.next(numRows)); } catch (IOException e) { throw getTIOError(e); } finally { if (scanner != null) { scanner.close(); } closeTable(htable); } return results; } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testGetScannerResults() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); ByteBuffer table = wrap(tableAname); TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(columnValue); for (int i = 0; i < 20; i++) { TPut put = new TPut(wrap(("testGetScannerResults" + pad(i, (byte) 2)).getBytes()), columnValues); handler.put(table, put); } TScan scan = new TScan(); List<TColumn> columns = new ArrayList<TColumn>(); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testGetScannerResults".getBytes()); scan.setStopRow("testGetScannerResults05".getBytes()); List<TResult> results = handler.getScannerResults(table, scan, 5); assertEquals(5, results.size()); for (int i = 0; i < 5; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan.setStopRow("testGetScannerResults10".getBytes()); results = handler.getScannerResults(table, scan, 10); assertEquals(10, results.size()); for (int i = 0; i < 10; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan.setStopRow("testGetScannerResults20".getBytes()); results = handler.getScannerResults(table, scan, 20); assertEquals(20, results.size()); for (int i = 0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan = new TScan(); scan.setColumns(columns); scan.setReversed(true); scan.setStartRow("testGetScannerResults20".getBytes()); scan.setStopRow("testGetScannerResults".getBytes()); results = handler.getScannerResults(table, scan, 20); assertEquals(20, results.size()); for (int i = 0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(19 - i, (byte) 2)).getBytes(), results.get(i) .getRow()); } } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult get(ByteBuffer table, TGet get) throws TIOError, TException { Table htable = getTable(table); try { return resultFromHBase(htable.get(getFromThrift(get))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testFilterRegistration() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.set("hbase.thrift.filters", "MyFilter:filterclass"); ThriftServer.registerFilters(conf); Map<String, String> registeredFilters = ParseFilter.getAllFilters(); assertEquals("filterclass", registeredFilters.get("MyFilter")); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void put(ByteBuffer table, TPut put) throws TIOError, TException { Table htable = getTable(table); try { htable.put(putFromThrift(put)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testAttribute() throws Exception { byte[] rowName = "testAttribute".getBytes(); byte[] attributeKey = "attribute1".getBytes(); byte[] attributeValue = "value1".getBytes(); Map<ByteBuffer, ByteBuffer> attributes = new HashMap<ByteBuffer, ByteBuffer>(); attributes.put(wrap(attributeKey), wrap(attributeValue)); TGet tGet = new TGet(wrap(rowName)); tGet.setAttributes(attributes); Get get = getFromThrift(tGet); assertArrayEquals(get.getAttribute("attribute1"), attributeValue); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); TPut tPut = new TPut(wrap(rowName) , columnValues); tPut.setAttributes(attributes); Put put = putFromThrift(tPut); assertArrayEquals(put.getAttribute("attribute1"), attributeValue); TScan tScan = new TScan(); tScan.setAttributes(attributes); Scan scan = scanFromThrift(tScan); assertArrayEquals(scan.getAttribute("attribute1"), attributeValue); List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns); tIncrement.setAttributes(attributes); Increment increment = incrementFromThrift(tIncrement); assertArrayEquals(increment.getAttribute("attribute1"), attributeValue); TDelete tDelete = new TDelete(wrap(rowName)); tDelete.setAttributes(attributes); Delete delete = deleteFromThrift(tDelete); assertArrayEquals(delete.getAttribute("attribute1"), attributeValue); } |
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void mutateRow(ByteBuffer table, TRowMutations rowMutations) throws TIOError, TException { Table htable = getTable(table); try { htable.mutateRow(rowMutationsFromThrift(rowMutations)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(final Configuration conf,
final UserProvider userProvider); static THBaseService.Iface newInstance(
THBaseService.Iface handler, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family,
ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); @Override List<THRegionLocation> getAllRegionLocations(ByteBuffer table); @Override THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload); } | @Test public void testMutateRow() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testMutateRow".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); handler.put(table,putA); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List<TColumnValue> returnedColumnValues = result.getColumnValues(); List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueA); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyAname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); TDelete delete = new TDelete(wrap(rowName)); List<TColumn> deleteColumns = new ArrayList<TColumn>(); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); List<TMutation> mutations = new ArrayList<TMutation>(); TMutation mutationA = TMutation.put(putB); mutations.add(mutationA); TMutation mutationB = TMutation.deleteSingle(delete); mutations.add(mutationB); TRowMutations tRowMutations = new TRowMutations(wrap(rowName),mutations); handler.mutateRow(table,tRowMutations); result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); returnedColumnValues = result.getColumnValues(); expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); } |
ClientExceptionsUtil { public static Throwable findException(Object exception) { if (exception == null || !(exception instanceof Throwable)) { return null; } Throwable cur = (Throwable) exception; while (cur != null) { if (isSpecialException(cur)) { return cur; } if (cur instanceof RemoteException) { RemoteException re = (RemoteException) cur; cur = re.unwrapRemoteException( RegionOpeningException.class, RegionMovedException.class, RegionTooBusyException.class); if (cur == null) { cur = re.unwrapRemoteException(); } if (cur == re) { return cur; } } else if (cur.getCause() != null) { cur = cur.getCause(); } else { return cur; } } return null; } private ClientExceptionsUtil(); static boolean isMetaClearingException(Throwable cur); static boolean isSpecialException(Throwable cur); static Throwable findException(Object exception); } | @Test public void testFindException() throws Exception { IOException ioe = new IOException("Tesst"); ServiceException se = new ServiceException(ioe); assertEquals(ioe, ClientExceptionsUtil.findException(se)); } |
LongComparator extends ByteArrayComparable { @Override public int compareTo(byte[] value, int offset, int length) { Long that = Bytes.toLong(value, offset, length); return this.longValue.compareTo(that); } LongComparator(long value); @Override int compareTo(byte[] value, int offset, int length); @Override byte [] toByteArray(); static LongComparator parseFrom(final byte [] pbBytes); } | @Test public void testSimple() { for (int i = 1; i < values.length ; i++) { for (int j = 0; j < i; j++) { LongComparator cp = new LongComparator(values[i]); assertEquals(1, cp.compareTo(Bytes.toBytes(values[j]))); } } } |
RegionLocations { public RegionLocations removeByServer(ServerName serverName) { HRegionLocation[] newLocations = null; for (int i = 0; i < locations.length; i++) { if (locations[i] != null && serverName.equals(locations[i].getServerName())) { if (newLocations == null) { newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, i); } newLocations[i] = null; } else if (newLocations != null) { newLocations[i] = locations[i]; } } return newLocations == null ? this : new RegionLocations(newLocations); } RegionLocations(HRegionLocation... locations); RegionLocations(Collection<HRegionLocation> locations); int size(); int numNonNullElements(); boolean isEmpty(); RegionLocations removeByServer(ServerName serverName); RegionLocations remove(HRegionLocation location); RegionLocations remove(int replicaId); RegionLocations mergeLocations(RegionLocations other); RegionLocations updateLocation(HRegionLocation location,
boolean checkForEquals, boolean force); HRegionLocation getRegionLocation(int replicaId); HRegionLocation getRegionLocationByRegionName(byte[] regionName); HRegionLocation[] getRegionLocations(); HRegionLocation getDefaultRegionLocation(); HRegionLocation getRegionLocation(); @Override String toString(); } | @Test public void testRemoveByServer() { RegionLocations list; list = new RegionLocations(); assertTrue(list == list.removeByServer(sn0)); list = hrll(hrl(info0, sn0)); assertTrue(list == list.removeByServer(sn1)); list = list.removeByServer(sn0); assertEquals(0, list.numNonNullElements()); list = hrll(hrl(info0, sn0), hrl(info1, sn1), hrl(info2, sn2), hrl(info9, sn2)); assertTrue(list == list.removeByServer(sn3)); list = list.removeByServer(sn0); assertNull(list.getRegionLocation(0)); assertEquals(sn1, list.getRegionLocation(1).getServerName()); assertEquals(sn2, list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertEquals(sn2, list.getRegionLocation(9).getServerName()); list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0)); list = list.removeByServer(sn0); assertEquals(sn1, list.getRegionLocation(0).getServerName()); assertEquals(sn1, list.getRegionLocation(1).getServerName()); assertNull(list.getRegionLocation(2)); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); } |
RegionLocations { public RegionLocations remove(HRegionLocation location) { if (location == null) return this; if (location.getRegionInfo() == null) return this; int replicaId = location.getRegionInfo().getReplicaId(); if (replicaId >= locations.length) return this; if (locations[replicaId] == null || !location.getRegionInfo().equals(locations[replicaId].getRegionInfo()) || !location.equals(locations[replicaId])) { return this; } HRegionLocation[] newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = null; return new RegionLocations(newLocations); } RegionLocations(HRegionLocation... locations); RegionLocations(Collection<HRegionLocation> locations); int size(); int numNonNullElements(); boolean isEmpty(); RegionLocations removeByServer(ServerName serverName); RegionLocations remove(HRegionLocation location); RegionLocations remove(int replicaId); RegionLocations mergeLocations(RegionLocations other); RegionLocations updateLocation(HRegionLocation location,
boolean checkForEquals, boolean force); HRegionLocation getRegionLocation(int replicaId); HRegionLocation getRegionLocationByRegionName(byte[] regionName); HRegionLocation[] getRegionLocations(); HRegionLocation getDefaultRegionLocation(); HRegionLocation getRegionLocation(); @Override String toString(); } | @Test public void testRemove() { RegionLocations list; list = new RegionLocations(); assertTrue(list == list.remove(hrl(info0, sn0))); list = hrll(hrl(info0, sn0)); assertTrue(list == list.remove(hrl(info0, sn1))); list = list.remove(hrl(info0, sn0)); assertTrue(list.isEmpty()); list = hrll(hrl(info0, sn0), hrl(info1, sn1), hrl(info2, sn2), hrl(info9, sn2)); assertTrue(list == list.remove(hrl(info1, sn3))); list = list.remove(hrl(info0, sn0)); assertNull(list.getRegionLocation(0)); assertEquals(sn1, list.getRegionLocation(1).getServerName()); assertEquals(sn2, list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertEquals(sn2, list.getRegionLocation(9).getServerName()); list = list.remove(hrl(info9, sn2)); assertNull(list.getRegionLocation(0)); assertEquals(sn1, list.getRegionLocation(1).getServerName()); assertEquals(sn2, list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0)); list = list.remove(hrl(info9, sn0)); assertEquals(sn1, list.getRegionLocation(0).getServerName()); assertEquals(sn1, list.getRegionLocation(1).getServerName()); assertEquals(sn0, list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); } |
RegionLocations { public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, boolean force) { assert location != null; int replicaId = location.getRegionInfo().getReplicaId(); HRegionLocation oldLoc = getRegionLocation(location.getRegionInfo().getReplicaId()); HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force); if (selectedLoc == oldLoc) { return this; } HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; for (int i=0; i < newLocations.length; i++) { if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegionInfo(), newLocations[i].getRegionInfo())) { newLocations[i] = null; } } } return new RegionLocations(newLocations); } RegionLocations(HRegionLocation... locations); RegionLocations(Collection<HRegionLocation> locations); int size(); int numNonNullElements(); boolean isEmpty(); RegionLocations removeByServer(ServerName serverName); RegionLocations remove(HRegionLocation location); RegionLocations remove(int replicaId); RegionLocations mergeLocations(RegionLocations other); RegionLocations updateLocation(HRegionLocation location,
boolean checkForEquals, boolean force); HRegionLocation getRegionLocation(int replicaId); HRegionLocation getRegionLocationByRegionName(byte[] regionName); HRegionLocation[] getRegionLocations(); HRegionLocation getDefaultRegionLocation(); HRegionLocation getRegionLocation(); @Override String toString(); } | @Test public void testUpdateLocation() { RegionLocations list; list = new RegionLocations(); list = list.updateLocation(hrl(info0, sn1), false, false); assertEquals(sn1, list.getRegionLocation(0).getServerName()); list = list.updateLocation(hrl(info9, sn3, 10), false, false); assertEquals(sn3, list.getRegionLocation(9).getServerName()); assertEquals(10, list.size()); list = list.updateLocation(hrl(info2, sn2, 10), false, false); assertEquals(sn2, list.getRegionLocation(2).getServerName()); assertEquals(10, list.size()); list = list.updateLocation(hrl(info2, sn3, 11), false, false); assertEquals(sn3, list.getRegionLocation(2).getServerName()); assertEquals(sn3, list.getRegionLocation(9).getServerName()); list = list.updateLocation(hrl(info2, sn1, 11), false, false); assertEquals(sn3, list.getRegionLocation(2).getServerName()); assertEquals(sn3, list.getRegionLocation(9).getServerName()); list = list.updateLocation(hrl(info2, sn1, 11), true, false); assertEquals(sn1, list.getRegionLocation(2).getServerName()); assertEquals(sn3, list.getRegionLocation(9).getServerName()); list = list.updateLocation(hrl(info2, sn2, 9), false, true); assertEquals(sn2, list.getRegionLocation(2).getServerName()); assertEquals(sn3, list.getRegionLocation(9).getServerName()); } |
HBaseFsck extends Configured implements Closeable { @Override public void close() throws IOException { IOUtils.closeQuietly(admin); IOUtils.closeQuietly(meta); IOUtils.closeQuietly(connection); } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); @Override void close(); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table); static void setDisplayFullReport(); static void setForceExclusive(); boolean isExclusive(); static void setDisableBalancer(); boolean shouldDisableBalancer(); void setFixTableLocks(boolean shouldFix); void setFixTableZNodes(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf,
Path p); static void debugLsr(Configuration conf,
Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; } | @Test public void testHbckWithExcessReplica() throws Exception { TableName table = TableName.valueOf("testHbckWithExcessReplica"); try { setupTableWithRegionReplica(table, 2); TEST_UTIL.getHBaseAdmin().flush(table.getName()); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); HTable meta = new HTable(conf, TableName.META_TABLE_NAME); List<HRegionInfo> regions = TEST_UTIL.getHBaseAdmin().getTableRegions(table); byte[] startKey = Bytes.toBytes("B"); byte[] endKey = Bytes.toBytes("C"); byte[] metaKey = null; HRegionInfo newHri = null; for (HRegionInfo h : regions) { if (Bytes.compareTo(h.getStartKey(), startKey) == 0 && Bytes.compareTo(h.getEndKey(), endKey) == 0 && h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { metaKey = h.getRegionName(); newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2); break; } } Put put = new Put(metaKey); ServerName sn = TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers() .toArray(new ServerName[0])[0]; MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2); meta.put(put); meta.flushCommits(); HBaseFsckRepair.fixUnassigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri); HBaseFsckRepair.waitUntilAssigned((HBaseAdmin)TEST_UTIL.getHBaseAdmin(), newHri); Delete delete = new Delete(metaKey); delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2)); delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2)); delete.deleteColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2)); meta.delete(delete); meta.flushCommits(); meta.close(); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.NOT_IN_META}); hbck = doFsck(conf, true); hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[]{}); } finally { cleanupTable(table); } }
@Test (timeout=180000) public void testNotInHdfsWithReplicas() throws Exception { TableName table = TableName.valueOf("tableNotInHdfs"); HBaseAdmin admin = new HBaseAdmin(conf); try { HRegionInfo[] oldHris = new HRegionInfo[2]; setupTableWithRegionReplica(table, 2); assertEquals(ROWKEYS.length, countRows()); NavigableMap<HRegionInfo, ServerName> map = MetaScanner.allTableRegions(TEST_UTIL.getConnection(), tbl.getName()); int i = 0; for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) { if (m.getKey().getStartKey().length > 0 && m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) { LOG.debug("Initially server hosting " + m.getKey() + " is " + m.getValue()); oldHris[i++] = m.getKey(); } } TEST_UTIL.getHBaseAdmin().flush(table.getName()); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS}); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2, countRows()); i = 0; HRegionInfo[] newHris = new HRegionInfo[2]; map = MetaScanner.allTableRegions(TEST_UTIL.getConnection(), tbl.getName()); for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) { if (m.getKey().getStartKey().length > 0 && m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) { newHris[i++] = m.getKey(); } } Collection<ServerName> servers = admin.getClusterStatus().getServers(); Set<HRegionInfo> onlineRegions = new HashSet<HRegionInfo>(); for (ServerName s : servers) { List<HRegionInfo> list = admin.getOnlineRegions(s); onlineRegions.addAll(list); } assertTrue(onlineRegions.containsAll(Arrays.asList(newHris))); assertFalse(onlineRegions.removeAll(Arrays.asList(oldHris))); } finally { cleanupTable(table); admin.close(); } }
@Test(timeout=75000) public void testSplitDaughtersNotInMeta() throws Exception { TableName table = TableName.valueOf("testSplitdaughtersNotInMeta"); Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); admin.flush(table); HRegionLocation location = tbl.getRegionLocation(Bytes.toBytes("B")); HRegionInfo hri = location.getRegionInfo(); admin.enableCatalogJanitor(false); byte[] regionName = location.getRegionInfo().getRegionName(); admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM")); TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true); PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(meta.get(new Get(regionName))); Map<HRegionInfo, ServerName> hris = tbl.getRegionLocations(); undeployRegion(connection, hris.get(daughters.getFirst()), daughters.getFirst()); undeployRegion(connection, hris.get(daughters.getSecond()), daughters.getSecond()); List<Delete> deletes = new ArrayList<>(); deletes.add(new Delete(daughters.getFirst().getRegionName())); deletes.add(new Delete(daughters.getSecond().getRegionName())); meta.delete(deletes); RegionStates regionStates = TEST_UTIL.getMiniHBaseCluster().getMaster(). getAssignmentManager().getRegionStates(); regionStates.deleteRegion(daughters.getFirst()); regionStates.deleteRegion(daughters.getSecond()); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN }); hbck = doFsck( conf, true, true, false, false, false, false, false, false, false, false, false, null); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN }); Get get = new Get(hri.getRegionName()); Result result = meta.get(get); assertNotNull(result); assertNotNull(MetaTableAccessor.getHRegionInfo(result)); assertEquals(ROWKEYS.length, countRows()); assertEquals(tbl.getStartKeys().length, SPLITS.length + 1 + 1); assertNoErrors(doFsck(conf, false)); } finally { admin.enableCatalogJanitor(true); meta.close(); cleanupTable(table); } }
@Test(timeout=180000) public void testQuarantineMissingHFile() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); final FileSystem fs = FileSystem.get(conf); HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) { AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false); @Override protected void checkHFile(Path p) throws IOException { if (attemptedFirstHFile.compareAndSet(false, true)) { assertTrue(fs.delete(p, true)); } super.checkHFile(p); } }; } }; doQuarantineTest(table, hbck, 4, 0, 0, 0, 1); hbck.close(); }
@Ignore @Test(timeout=180000) public void testQuarantineMissingFamdir() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); final FileSystem fs = FileSystem.get(conf); HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) { AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false); @Override protected void checkColFamDir(Path p) throws IOException { if (attemptedFirstHFile.compareAndSet(false, true)) { assertTrue(fs.delete(p, true)); } super.checkColFamDir(p); } }; } }; doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); hbck.close(); }
@Test(timeout=180000) public void testQuarantineMissingRegionDir() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); final FileSystem fs = FileSystem.get(conf); HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) { @Override public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) { AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false); @Override protected void checkRegionDir(Path p) throws IOException { if (attemptedFirstHFile.compareAndSet(false, true)) { assertTrue(fs.delete(p, true)); } super.checkRegionDir(p); } }; } }; doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); hbck.close(); } |
RegionLocations { public RegionLocations mergeLocations(RegionLocations other) { assert other != null; HRegionLocation[] newLocations = null; int max = other.locations.length; HRegionInfo regionInfo = null; for (int i = 0; i < max; i++) { HRegionLocation thisLoc = this.getRegionLocation(i); HRegionLocation otherLoc = other.getRegionLocation(i); if (regionInfo == null && otherLoc != null && otherLoc.getRegionInfo() != null) { regionInfo = otherLoc.getRegionInfo(); } HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { newLocations = new HRegionLocation[max]; System.arraycopy(locations, 0, newLocations, 0, i); } } if (newLocations != null) { newLocations[i] = selectedLoc; } } if (newLocations != null && regionInfo != null) { for (int i=0; i < newLocations.length; i++) { if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegionInfo())) { newLocations[i] = null; } } } } return newLocations == null ? this : new RegionLocations(newLocations); } RegionLocations(HRegionLocation... locations); RegionLocations(Collection<HRegionLocation> locations); int size(); int numNonNullElements(); boolean isEmpty(); RegionLocations removeByServer(ServerName serverName); RegionLocations remove(HRegionLocation location); RegionLocations remove(int replicaId); RegionLocations mergeLocations(RegionLocations other); RegionLocations updateLocation(HRegionLocation location,
boolean checkForEquals, boolean force); HRegionLocation getRegionLocation(int replicaId); HRegionLocation getRegionLocationByRegionName(byte[] regionName); HRegionLocation[] getRegionLocations(); HRegionLocation getDefaultRegionLocation(); HRegionLocation getRegionLocation(); @Override String toString(); } | @Test public void testMergeLocations() { RegionLocations list1, list2; list1 = new RegionLocations(); list2 = new RegionLocations(); assertTrue(list1 == list1.mergeLocations(list2)); list2 = hrll(hrl(info0, sn0)); list1 = list1.mergeLocations(list2); assertEquals(sn0, list1.getRegionLocation(0).getServerName()); list1 = hrll(); list1 = list2.mergeLocations(list1); assertEquals(sn0, list1.getRegionLocation(0).getServerName()); list1 = hrll(hrl(info0, sn0), hrl(info1, sn1)); list2 = hrll(hrl(info2, sn2)); list1 = list2.mergeLocations(list1); assertEquals(sn0, list1.getRegionLocation(0).getServerName()); assertEquals(sn1, list1.getRegionLocation(1).getServerName()); assertEquals(2, list1.size()); list1 = hrll(hrl(info0, sn0), hrl(info1, sn1)); list2 = hrll(hrl(info2, sn2)); list1 = list1.mergeLocations(list2); assertEquals(sn0, list1.getRegionLocation(0).getServerName()); assertEquals(sn1, list1.getRegionLocation(1).getServerName()); assertEquals(sn2, list1.getRegionLocation(2).getServerName()); list1 = hrll(hrl(info0, sn0), hrl(info1, sn1)); list2 = hrll(hrl(info0, sn2), hrl(info1, sn2), hrl(info9, sn3)); list1 = list2.mergeLocations(list1); assertEquals(2, list1.size()); assertEquals(sn0, list1.getRegionLocation(0).getServerName()); assertEquals(sn1, list1.getRegionLocation(1).getServerName()); list1 = hrll(hrl(info0, sn0), hrl(info1, sn1)); list2 = hrll(hrl(info0, sn2), hrl(info1, sn2), hrl(info9, sn3)); list1 = list1.mergeLocations(list2); assertEquals(10, list1.size()); assertEquals(sn2, list1.getRegionLocation(0).getServerName()); assertEquals(sn2, list1.getRegionLocation(1).getServerName()); assertEquals(sn3, list1.getRegionLocation(9).getServerName()); list1 = hrll(hrl(info0, sn0, 10), hrl(info1, sn1, 10)); list2 = hrll(hrl(info0, sn2, 11), hrl(info1, sn2, 11), hrl(info9, sn3, 11)); list1 = list1.mergeLocations(list2); assertEquals(10, list1.size()); assertEquals(sn2, list1.getRegionLocation(0).getServerName()); assertEquals(sn2, list1.getRegionLocation(1).getServerName()); assertEquals(sn3, list1.getRegionLocation(9).getServerName()); list1 = hrll(hrl(info0, sn0, 10), hrl(info1, sn1, 10)); list2 = hrll(hrl(info0, sn2, 11), hrl(info1, sn2, 11), hrl(info9, sn3, 11)); list1 = list1.mergeLocations(list2); assertEquals(10, list1.size()); assertEquals(sn2, list1.getRegionLocation(0).getServerName()); assertEquals(sn2, list1.getRegionLocation(1).getServerName()); assertEquals(sn3, list1.getRegionLocation(9).getServerName()); } |
IPCUtil { @SuppressWarnings("resource") public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, final CellScanner cellScanner) throws IOException { return buildCellBlock(codec, compressor, cellScanner, null); } IPCUtil(final Configuration conf); @SuppressWarnings("resource") ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner); @SuppressWarnings("resource") ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner, final BoundedByteBufferPool pool); CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor,
final byte [] cellBlock); CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor,
final byte [] cellBlock, final int offset, final int length); static ByteBuffer getDelimitedMessageAsByteBuffer(final Message m); static int write(final OutputStream dos, final Message header, final Message param,
final ByteBuffer cellBlock); static void readChunked(final DataInput in, byte[] dest, int offset, int len); static int getTotalSizeWhenWrittenDelimited(Message ... messages); static final Log LOG; } | @Test public void testBuildCellBlock() throws IOException { doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), null); doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), new DefaultCodec()); doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), new GzipCodec()); } |
PayloadCarryingRpcController extends TimeLimitedRpcController implements CellScannable { public CellScanner cellScanner() { return cellScanner; } PayloadCarryingRpcController(); PayloadCarryingRpcController(final CellScanner cellScanner); PayloadCarryingRpcController(final List<CellScannable> cellIterables); CellScanner cellScanner(); void setCellScanner(final CellScanner cellScanner); void setPriority(int priority); void setPriority(final TableName tn); int getPriority(); @Override void reset(); } | @Test public void testListOfCellScannerables() throws IOException { List<CellScannable> cells = new ArrayList<CellScannable>(); final int count = 10; for (int i = 0; i < count; i++) { cells.add(createCell(i)); } PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); CellScanner cellScanner = controller.cellScanner(); int index = 0; for (; cellScanner.advance(); index++) { Cell cell = cellScanner.current(); byte [] indexBytes = Bytes.toBytes(index); assertTrue("" + index, Bytes.equals(indexBytes, 0, indexBytes.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } assertEquals(count, index); } |
AsyncProcess { public <CResult> AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults) throws InterruptedIOException { return submit(null, tableName, rows, atLeastOne, callback, needResults); } AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory); AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows,
boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
boolean needResults); AsyncRequestFuture submitAll(TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); boolean hasError(); RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
List<Row> failedRows); static final String PRIMARY_CALL_TIMEOUT_KEY; static final String START_LOG_ERRORS_AFTER_COUNT_KEY; static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT; } | @Test public void testSubmit() throws Exception { ClusterConnection hc = createHConnection(); AsyncProcess ap = new MyAsyncProcess(hc, conf); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); }
@Test public void testSubmitBusyRegionServer() throws Exception { ClusterConnection hc = createHConnection(); AsyncProcess ap = new MyAsyncProcess(hc, conf); ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer)); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); puts.add(createPut(3, true)); puts.add(createPut(1, true)); puts.add(createPut(2, true)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertEquals(" puts=" + puts, 1, puts.size()); ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer - 1)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); }
@Test public void testSubmitTrue() throws IOException { final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false); ap.tasksInProgress.incrementAndGet(); final AtomicInteger ai = new AtomicInteger(1); ap.taskCounterPerRegion.put(hri1.getRegionName(), ai); final AtomicBoolean checkPoint = new AtomicBoolean(false); final AtomicBoolean checkPoint2 = new AtomicBoolean(false); Thread t = new Thread(){ @Override public void run(){ Threads.sleep(1000); Assert.assertFalse(checkPoint.get()); ai.decrementAndGet(); ap.tasksInProgress.decrementAndGet(); checkPoint2.set(true); } }; List<Put> puts = new ArrayList<Put>(); Put p = createPut(1, true); puts.add(p); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertFalse(puts.isEmpty()); t.start(); ap.submit(DUMMY_TABLE, puts, true, null, false); Assert.assertTrue(puts.isEmpty()); checkPoint.set(true); while (!checkPoint2.get()){ Threads.sleep(1); } }
@Test public void testUncheckedException() throws Exception { ClusterConnection hc = createHConnection(); MyThreadPoolExecutor myPool = new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(200)); AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, conf, myPool); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); } |
AsyncProcess { @VisibleForTesting void waitUntilDone() throws InterruptedIOException { waitForMaximumCurrentTasks(0); } AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory); AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows,
boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
boolean needResults); AsyncRequestFuture submitAll(TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); boolean hasError(); RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
List<Row> failedRows); static final String PRIMARY_CALL_TIMEOUT_KEY; static final String START_LOG_ERRORS_AFTER_COUNT_KEY; static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT; } | @Test public void testHTableFailedPutAndNewPut() throws Exception { ClusterConnection conn = createHConnection(); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null, new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(0)); MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true); mutator.ap = ap; Put p = createPut(1, false); mutator.mutate(p); ap.waitUntilDone(); p = createPut(1, true); Assert.assertEquals(0, mutator.getWriteBuffer().size()); try { mutator.mutate(p); Assert.fail(); } catch (RetriesExhaustedException expected) { } Assert.assertEquals("the put should not been inserted.", 0, mutator.getWriteBuffer().size()); } |
AsyncProcess { protected ConnectionManager.ServerErrorTracker createServerErrorTracker() { return new ConnectionManager.ServerErrorTracker( this.serverTrackerTimeout, this.numTries); } AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory); AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows,
boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
boolean needResults); AsyncRequestFuture submitAll(TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); boolean hasError(); RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
List<Row> failedRows); static final String PRIMARY_CALL_TIMEOUT_KEY; static final String START_LOG_ERRORS_AFTER_COUNT_KEY; static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT; } | @Test public void testErrorsServers() throws IOException { Configuration configuration = new Configuration(conf); ClusterConnection conn = new MyConnectionImpl(configuration); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null, new BufferedMutatorParams(DUMMY_TABLE)); configuration.setBoolean(ConnectionManager.RETRIES_BY_SERVER_KEY, true); MyAsyncProcess ap = new MyAsyncProcess(conn, configuration, true); mutator.ap = ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Assert.assertTrue(mutator.ap.serverTrackerTimeout > 200); mutator.ap.serverTrackerTimeout = 1; Put p = createPut(1, false); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch (RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); }
@Test public void testGlobalErrors() throws IOException { ClusterConnection conn = new MyConnectionImpl(conf); BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(DUMMY_TABLE); AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, conf, new IOException("test")); mutator.ap = ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Put p = createPut(1, true); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch (RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); }
@Test public void testCallQueueTooLarge() throws IOException { ClusterConnection conn = new MyConnectionImpl(conf); BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(DUMMY_TABLE); AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, conf, new CallQueueTooBigException()); mutator.ap = ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Put p = createPut(1, true); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch (RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.