src_fm_fc_ms_ff
stringlengths
43
86.8k
target
stringlengths
20
276k
SplitTransaction { public boolean prepare() { if (!this.parent.isSplittable()) return false; if (this.splitrow == null) return false; HRegionInfo hri = this.parent.getRegionInfo(); parent.prepareToSplit(); byte [] startKey = hri.getStartKey(); byte [] endKey = hri.getEndKey(); if (Bytes.equals(startKey, splitrow) || !this.parent.getRegionInfo().containsRow(splitrow)) { LOG.info("Split row is not inside region key range or is equal to " + "startkey: " + Bytes.toStringBinary(this.splitrow)); return false; } long rid = getDaughterRegionIdTimestamp(hri); this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid); this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid); return true; } SplitTransaction(final HRegion r, final byte [] splitrow); boolean prepare(); PairOfSameType<HRegion> stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); PairOfSameType<HRegion> execute(final Server server, final RegionServerServices services); PairOfSameType<HRegion> stepsAfterPONR(final Server server, final RegionServerServices services, PairOfSameType<HRegion> regions); Put addLocation(final Put p, final ServerName sn, long openSeqNum); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); public SplitTransactionCoordination.SplitTransactionDetails std; }
@Test public void testPrepare() throws IOException { prepareGOOD_SPLIT_ROW(); } @Test public void testPrepareWithRegionsWithReference() throws IOException { HStore storeMock = Mockito.mock(HStore.class); when(storeMock.hasReferences()).thenReturn(true); when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of()); this.parent.stores.put(Bytes.toBytes(""), storeMock); SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW); assertFalse("a region should not be splittable if it has instances of store file references", st.prepare()); } @Test public void testPrepareWithBadSplitRow() throws IOException { SplitTransaction st = new SplitTransaction(this.parent, STARTROW); assertFalse(st.prepare()); st = new SplitTransaction(this.parent, HConstants.EMPTY_BYTE_ARRAY); assertFalse(st.prepare()); st = new SplitTransaction(this.parent, new byte [] {'A', 'A', 'A'}); assertFalse(st.prepare()); st = new SplitTransaction(this.parent, ENDROW); assertFalse(st.prepare()); } @Test public void testPrepareWithClosedRegion() throws IOException { this.parent.close(); SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW); assertFalse(st.prepare()); }
HtmlQuoting { public static boolean needsQuoting(byte[] data, int off, int len) { if (off+len > data.length) { throw new IllegalStateException("off+len=" + off+len + " should be lower" + " than data length=" + data.length); } for(int i=off; i< off+len; ++i) { switch(data[i]) { case '&': case '<': case '>': case '\'': case '"': return true; default: break; } } return false; } static boolean needsQuoting(byte[] data, int off, int len); static boolean needsQuoting(String str); static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len); static String quoteHtmlChars(String item); static OutputStream quoteOutputStream(final OutputStream out ); static String unquoteHtmlChars(String item); static void main(String[] args); }
@Test public void testNeedsQuoting() throws Exception { assertTrue(HtmlQuoting.needsQuoting("abcde>")); assertTrue(HtmlQuoting.needsQuoting("<abcde")); assertTrue(HtmlQuoting.needsQuoting("abc'de")); assertTrue(HtmlQuoting.needsQuoting("abcde\"")); assertTrue(HtmlQuoting.needsQuoting("&")); assertFalse(HtmlQuoting.needsQuoting("")); assertFalse(HtmlQuoting.needsQuoting("ab\ncdef")); assertFalse(HtmlQuoting.needsQuoting(null)); }
SplitTransaction { public PairOfSameType<HRegion> execute(final Server server, final RegionServerServices services) throws IOException { useZKForAssignment = server == null ? true : ConfigUtil.useZKForAssignment(server.getConfiguration()); if (useCoordinatedStateManager(server)) { std = ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) .getSplitTransactionCoordination().getDefaultDetails(); } PairOfSameType<HRegion> regions = createDaughters(server, services); if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().preSplitAfterPONR(); } return stepsAfterPONR(server, services, regions); } SplitTransaction(final HRegion r, final byte [] splitrow); boolean prepare(); PairOfSameType<HRegion> stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); PairOfSameType<HRegion> execute(final Server server, final RegionServerServices services); PairOfSameType<HRegion> stepsAfterPONR(final Server server, final RegionServerServices services, PairOfSameType<HRegion> regions); Put addLocation(final Put p, final ServerName sn, long openSeqNum); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); public SplitTransactionCoordination.SplitTransactionDetails std; }
@Test public void testWholesomeSplit() throws IOException { final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true); assertTrue(rowcount > 0); int parentRowCount = countRows(this.parent); assertEquals(rowcount, parentRowCount); CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); ((LruBlockCache) cacheConf.getBlockCache()).clearCache(); SplitTransaction st = prepareGOOD_SPLIT_ROW(); Server mockServer = Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); PairOfSameType<HRegion> daughters = st.execute(mockServer, null); assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir())); assertTrue(this.parent.isClosed()); assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length); assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey())); assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey())); assertTrue(Bytes.equals(daughters.getSecond().getStartKey(), GOOD_SPLIT_ROW)); assertTrue(Bytes.equals(this.parent.getEndKey(), daughters.getSecond().getEndKey())); int daughtersRowCount = 0; for (HRegion openRegion: daughters) { try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); daughtersRowCount += count; } finally { HRegion.closeHRegion(openRegion); } } assertEquals(rowcount, daughtersRowCount); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); }
SplitTransaction { @SuppressWarnings("deprecation") public boolean rollback(final Server server, final RegionServerServices services) throws IOException { if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().preRollBackSplit(); } boolean result = true; ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size()); while (iterator.hasPrevious()) { JournalEntry je = iterator.previous(); switch(je) { case SET_SPLITTING: if (useCoordinatedStateManager(server) && server instanceof HRegionServer) { ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) .getSplitTransactionCoordination().clean(this.parent.getRegionInfo()); } else if (services != null && !useZKForAssignment && !services.reportRegionTransition(TransitionCode.SPLIT_REVERTED, parent.getRegionInfo(), hri_a, hri_b)) { return false; } break; case CREATE_SPLIT_DIR: this.parent.writestate.writesEnabled = true; this.parent.getRegionFileSystem().cleanupSplitsDir(); break; case CLOSED_PARENT_REGION: try { this.parent.initialize(); } catch (IOException e) { LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " + this.parent.getRegionNameAsString(), e); throw new RuntimeException(e); } break; case STARTED_REGION_A_CREATION: this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a); break; case STARTED_REGION_B_CREATION: this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b); break; case OFFLINED_PARENT: if (services != null) services.addToOnlineRegions(this.parent); break; case PONR: return false; default: throw new RuntimeException("Unhandled journal entry: " + je); } } if (this.parent.getCoprocessorHost() != null) { this.parent.getCoprocessorHost().postRollBackSplit(); } return result; } SplitTransaction(final HRegion r, final byte [] splitrow); boolean prepare(); PairOfSameType<HRegion> stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); PairOfSameType<HRegion> execute(final Server server, final RegionServerServices services); PairOfSameType<HRegion> stepsAfterPONR(final Server server, final RegionServerServices services, PairOfSameType<HRegion> regions); Put addLocation(final Put p, final ServerName sn, long openSeqNum); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); public SplitTransactionCoordination.SplitTransactionDetails std; }
@Test public void testRollback() throws IOException { final int rowcount = TEST_UTIL.loadRegion(this.parent, CF); assertTrue(rowcount > 0); int parentRowCount = countRows(this.parent); assertEquals(rowcount, parentRowCount); HRegion spiedRegion = spy(this.parent); SplitTransaction st = prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransaction spiedUponSt = spy(st); when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())). thenThrow(new MockedFailedDaughterCreation()); boolean expectedException = false; Server mockServer = Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); try { spiedUponSt.execute(mockServer, null); } catch (MockedFailedDaughterCreation e) { expectedException = true; } assertTrue(expectedException); assertTrue(spiedUponSt.rollback(null, null)); int parentRowCount2 = countRows(this.parent); assertEquals(parentRowCount, parentRowCount2); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getFirstDaughter()))); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getSecondDaughter()))); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); assertTrue(st.prepare()); PairOfSameType<HRegion> daughters = st.execute(mockServer, null); int daughtersRowCount = 0; for (HRegion openRegion: daughters) { try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); daughtersRowCount += count; } finally { HRegion.closeHRegion(openRegion); } } assertEquals(rowcount, daughtersRowCount); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); assertTrue("Rollback hooks should be called.", wasRollBackHookCalled()); }
RegionMergeTransaction { public boolean prepare(final RegionServerServices services) { if (!region_a.getTableDesc().getTableName() .equals(region_b.getTableDesc().getTableName())) { LOG.info("Can't merge regions " + region_a + "," + region_b + " because they do not belong to the same table"); return false; } if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) { LOG.info("Can't merge the same region " + region_a); return false; } if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(), region_b.getRegionInfo())) { String msg = "Skip merging " + this.region_a.getRegionNameAsString() + " and " + this.region_b.getRegionNameAsString() + ", because they are not adjacent."; LOG.info(msg); return false; } if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) { return false; } try { boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services, region_a.getRegionName()); if (regionAHasMergeQualifier || hasMergeQualifierInMeta(services, region_b.getRegionName())) { LOG.debug("Region " + (regionAHasMergeQualifier ? region_a.getRegionNameAsString() : region_b.getRegionNameAsString()) + " is not mergeable because it has merge qualifier in META"); return false; } } catch (IOException e) { LOG.warn("Failed judging whether merge transaction is available for " + region_a.getRegionNameAsString() + " and " + region_b.getRegionNameAsString(), e); return false; } this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(), region_b.getRegionInfo()); return true; } RegionMergeTransaction(final HRegion a, final HRegion b, final boolean forcible); boolean prepare(final RegionServerServices services); HRegion execute(final Server server, final RegionServerServices services); HRegion stepsAfterPONR(final Server server, final RegionServerServices services, HRegion mergedRegion); void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName serverName, List<Mutation> mutations); @SuppressWarnings("deprecation") Put addLocation(final Put p, final ServerName sn, long openSeqNum); HRegion stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); static void createNodeMerging(final ZooKeeperWatcher zkw, final HRegionInfo region, final ServerName serverName, final HRegionInfo a, final HRegionInfo b); static int transitionMergingNode(ZooKeeperWatcher zkw, HRegionInfo merged, HRegionInfo a, HRegionInfo b, ServerName serverName, final int znodeVersion, final EventType beginState, final EventType endState); }
@Test public void testPrepare() throws IOException { prepareOnGoodRegions(); } @Test public void testPrepareWithSameRegion() throws IOException { RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, this.region_a, true); assertFalse("should not merge the same region even if it is forcible ", mt.prepare(null)); } @Test public void testPrepareWithRegionsNotAdjacent() throws IOException { RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, this.region_c, false); assertFalse("should not merge two regions if they are adjacent except it is forcible", mt.prepare(null)); } @Test public void testPrepareWithRegionsWithReference() throws IOException { HStore storeMock = Mockito.mock(HStore.class); when(storeMock.hasReferences()).thenReturn(true); when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of()); this.region_a.stores.put(Bytes.toBytes(""), storeMock); RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, this.region_b, false); assertFalse( "a region should not be mergeable if it has instances of store file references", mt.prepare(null)); } @Test public void testPrepareWithClosedRegion() throws IOException { this.region_a.close(); RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, this.region_b, false); assertFalse(mt.prepare(null)); }
RegionMergeTransaction { @SuppressWarnings("deprecation") public boolean rollback(final Server server, final RegionServerServices services) throws IOException { assert this.mergedRegionInfo != null; if (rsCoprocessorHost != null) { rsCoprocessorHost.preRollBackMerge(this.region_a, this.region_b); } boolean result = true; ListIterator<JournalEntry> iterator = this.journal .listIterator(this.journal.size()); while (iterator.hasPrevious()) { JournalEntry je = iterator.previous(); switch (je) { case SET_MERGING_IN_ZK: if (useZKAndZKIsSet(server)) { cleanZK(server, this.mergedRegionInfo); } else if (services != null && !useZKForAssignment && !services.reportRegionTransition(TransitionCode.MERGE_REVERTED, mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { return false; } break; case CREATED_MERGE_DIR: this.region_a.writestate.writesEnabled = true; this.region_b.writestate.writesEnabled = true; this.region_a.getRegionFileSystem().cleanupMergesDir(); break; case CLOSED_REGION_A: try { this.region_a.initialize(); } catch (IOException e) { LOG.error("Failed rollbacking CLOSED_REGION_A of region " + this.region_a.getRegionNameAsString(), e); throw new RuntimeException(e); } break; case OFFLINED_REGION_A: if (services != null) services.addToOnlineRegions(this.region_a); break; case CLOSED_REGION_B: try { this.region_b.initialize(); } catch (IOException e) { LOG.error("Failed rollbacking CLOSED_REGION_A of region " + this.region_b.getRegionNameAsString(), e); throw new RuntimeException(e); } break; case OFFLINED_REGION_B: if (services != null) services.addToOnlineRegions(this.region_b); break; case STARTED_MERGED_REGION_CREATION: this.region_a.getRegionFileSystem().cleanupMergedRegion( this.mergedRegionInfo); break; case PONR: return false; default: throw new RuntimeException("Unhandled journal entry: " + je); } } if (rsCoprocessorHost != null) { rsCoprocessorHost.postRollBackMerge(this.region_a, this.region_b); } return result; } RegionMergeTransaction(final HRegion a, final HRegion b, final boolean forcible); boolean prepare(final RegionServerServices services); HRegion execute(final Server server, final RegionServerServices services); HRegion stepsAfterPONR(final Server server, final RegionServerServices services, HRegion mergedRegion); void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName serverName, List<Mutation> mutations); @SuppressWarnings("deprecation") Put addLocation(final Put p, final ServerName sn, long openSeqNum); HRegion stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); static void createNodeMerging(final ZooKeeperWatcher zkw, final HRegionInfo region, final ServerName serverName, final HRegionInfo a, final HRegionInfo b); static int transitionMergingNode(ZooKeeperWatcher zkw, HRegionInfo merged, HRegionInfo a, HRegionInfo b, ServerName serverName, final int znodeVersion, final EventType beginState, final EventType endState); }
@Test public void testRollback() throws IOException, InterruptedException { final int rowCountOfRegionA = loadRegion(this.region_a, CF, true); final int rowCountOfRegionB = loadRegion(this.region_b, CF, true); assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); assertEquals(rowCountOfRegionA, countRows(this.region_a)); assertEquals(rowCountOfRegionB, countRows(this.region_b)); RegionMergeTransaction mt = prepareOnGoodRegions(); when(mt.createMergedRegionFromMerges(region_a, region_b, mt.getMergedRegionInfo())).thenThrow( new MockedFailedMergedRegionCreation()); boolean expectedException = false; TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( TEST_UTIL.getConfiguration()); Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); try { mt.execute(mockServer, null); } catch (MockedFailedMergedRegionCreation e) { expectedException = true; } assertTrue(expectedException); assertTrue(mt.rollback(null, null)); int rowCountOfRegionA2 = countRows(this.region_a); assertEquals(rowCountOfRegionA, rowCountOfRegionA2); int rowCountOfRegionB2 = countRows(this.region_b); assertEquals(rowCountOfRegionB, rowCountOfRegionB2); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, mt.getMergedRegionInfo()))); assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); assertTrue(mt.prepare(null)); HRegion mergedRegion = mt.execute(mockServer, null); try { int mergedRegionRowCount = countRows(mergedRegion); assertEquals((rowCountOfRegionA + rowCountOfRegionB), mergedRegionRowCount); } finally { HRegion.closeHRegion(mergedRegion); } assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); }
RegionMergeTransaction { public static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) { long rid = EnvironmentEdgeManager.currentTimeMillis(); if (rid < a.getRegionId() || rid < b.getRegionId()) { LOG.warn("Clock skew; merging regions id are " + a.getRegionId() + " and " + b.getRegionId() + ", but current time here is " + rid); rid = Math.max(a.getRegionId(), b.getRegionId()) + 1; } byte[] startKey = null; byte[] endKey = null; if (a.compareTo(b) <= 0) { startKey = a.getStartKey(); } else { startKey = b.getStartKey(); } if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) { endKey = a.getEndKey(); } else { endKey = b.getEndKey(); } HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey, endKey, false, rid); return mergedRegionInfo; } RegionMergeTransaction(final HRegion a, final HRegion b, final boolean forcible); boolean prepare(final RegionServerServices services); HRegion execute(final Server server, final RegionServerServices services); HRegion stepsAfterPONR(final Server server, final RegionServerServices services, HRegion mergedRegion); void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName serverName, List<Mutation> mutations); @SuppressWarnings("deprecation") Put addLocation(final Put p, final ServerName sn, long openSeqNum); HRegion stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing); static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b); @SuppressWarnings("deprecation") boolean rollback(final Server server, final RegionServerServices services); static void createNodeMerging(final ZooKeeperWatcher zkw, final HRegionInfo region, final ServerName serverName, final HRegionInfo a, final HRegionInfo b); static int transitionMergingNode(ZooKeeperWatcher zkw, HRegionInfo merged, HRegionInfo a, HRegionInfo b, ServerName serverName, final int znodeVersion, final EventType beginState, final EventType endState); }
@Test public void testMeregedRegionBoundary() { TableName tableName = TableName.valueOf("testMeregedRegionBoundary"); byte[] a = Bytes.toBytes("a"); byte[] b = Bytes.toBytes("b"); byte[] z = Bytes.toBytes("z"); HRegionInfo r1 = new HRegionInfo(tableName); HRegionInfo r2 = new HRegionInfo(tableName, a, z); HRegionInfo m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r1.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, a, z); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, z, null); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, a, z); r2 = new HRegionInfo(tableName, z, null); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, a, b); r2 = new HRegionInfo(tableName, b, z); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); }
StripeStoreFileManager implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { @Override public ImmutableCollection<StoreFile> clearFiles() { ImmutableCollection<StoreFile> result = state.allFilesCached; this.state = new State(); this.fileStarts.clear(); this.fileEnds.clear(); return result; } StripeStoreFileManager( KVComparator kvComparator, Configuration conf, StripeStoreConfig config); @Override void loadFiles(List<StoreFile> storeFiles); @Override Collection<StoreFile> getStorefiles(); @Override void insertNewFiles(Collection<StoreFile> sfs); @Override ImmutableCollection<StoreFile> clearFiles(); @Override int getStorefileCount(); @Override Iterator<StoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey); @Override Iterator<StoreFile> updateCandidateFilesForRowKeyBefore( Iterator<StoreFile> candidateFiles, final KeyValue targetKey, final KeyValue candidate); @Override /** * Override of getSplitPoint that determines the split point as the boundary between two * stripes, unless it causes significant imbalance between split sides' sizes. In that * case, the split boundary will be chosen from the middle of one of the stripes to * minimize imbalance. * @return The split point, or null if no split is possible. */ byte[] getSplitPoint(); @Override Collection<StoreFile> getFilesForScanOrGet( boolean isGet, byte[] startRow, byte[] stopRow); @Override void addCompactionResults( Collection<StoreFile> compactedFiles, Collection<StoreFile> results); @Override int getStoreCompactionPriority(); @Override final byte[] getStartRow(int stripeIndex); @Override final byte[] getEndRow(int stripeIndex); @Override List<StoreFile> getLevel0Files(); @Override List<byte[]> getStripeBoundaries(); @Override ArrayList<ImmutableList<StoreFile>> getStripes(); @Override int getStripeCount(); @Override Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting); static final byte[] STRIPE_START_KEY; static final byte[] STRIPE_END_KEY; final static byte[] OPEN_KEY; }
@Test public void testClearFiles() throws Exception { StripeStoreFileManager manager = createManager(); manager.insertNewFiles(al(createFile())); manager.insertNewFiles(al(createFile())); manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY))); assertEquals(4, manager.getStorefileCount()); Collection<StoreFile> allFiles = manager.clearFiles(); assertEquals(4, allFiles.size()); assertEquals(0, manager.getStorefileCount()); assertEquals(0, manager.getStorefiles().size()); }
StripeStoreFileManager implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { @Override public List<StoreFile> getLevel0Files() { return this.state.level0Files; } StripeStoreFileManager( KVComparator kvComparator, Configuration conf, StripeStoreConfig config); @Override void loadFiles(List<StoreFile> storeFiles); @Override Collection<StoreFile> getStorefiles(); @Override void insertNewFiles(Collection<StoreFile> sfs); @Override ImmutableCollection<StoreFile> clearFiles(); @Override int getStorefileCount(); @Override Iterator<StoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey); @Override Iterator<StoreFile> updateCandidateFilesForRowKeyBefore( Iterator<StoreFile> candidateFiles, final KeyValue targetKey, final KeyValue candidate); @Override /** * Override of getSplitPoint that determines the split point as the boundary between two * stripes, unless it causes significant imbalance between split sides' sizes. In that * case, the split boundary will be chosen from the middle of one of the stripes to * minimize imbalance. * @return The split point, or null if no split is possible. */ byte[] getSplitPoint(); @Override Collection<StoreFile> getFilesForScanOrGet( boolean isGet, byte[] startRow, byte[] stopRow); @Override void addCompactionResults( Collection<StoreFile> compactedFiles, Collection<StoreFile> results); @Override int getStoreCompactionPriority(); @Override final byte[] getStartRow(int stripeIndex); @Override final byte[] getEndRow(int stripeIndex); @Override List<StoreFile> getLevel0Files(); @Override List<byte[]> getStripeBoundaries(); @Override ArrayList<ImmutableList<StoreFile>> getStripes(); @Override int getStripeCount(); @Override Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting); static final byte[] STRIPE_START_KEY; static final byte[] STRIPE_END_KEY; final static byte[] OPEN_KEY; }
@Test @SuppressWarnings("unchecked") public void testLoadFilesWithRecoverableBadFiles() throws Exception { ArrayList<StoreFile> validStripeFiles = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), createFile(KEY_C, OPEN_KEY)); ArrayList<StoreFile> filesToGoToL0 = al(createFile(), createFile(null, KEY_A), createFile(KEY_D, null), createFile(KEY_D, KEY_A), createFile(keyAfter(KEY_A), KEY_C), createFile(OPEN_KEY, KEY_D), createFile(KEY_D, keyAfter(KEY_D))); ArrayList<StoreFile> allFilesToGo = flattenLists(validStripeFiles, filesToGoToL0); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); List<StoreFile> l0Files = manager.getLevel0Files(); assertEquals(filesToGoToL0.size(), l0Files.size()); for (StoreFile sf : filesToGoToL0) { assertTrue(l0Files.contains(sf)); } verifyAllFiles(manager, allFilesToGo); } @Test public void testLoadFilesWithBadStripe() throws Exception { ArrayList<StoreFile> allFilesToGo = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), createFile(KEY_B, keyAfter(KEY_B))); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); assertEquals(allFilesToGo.size(), manager.getLevel0Files().size()); }
ServerNonceManager { public void endOperation(long group, long nonce, boolean success) { if (nonce == HConstants.NO_NONCE) return; NonceKey nk = new NonceKey(group, nonce); OperationContext newResult = nonces.get(nk); assert newResult != null; synchronized (newResult) { assert newResult.getState() == OperationContext.WAIT; newResult.setState(success ? OperationContext.DONT_PROCEED : OperationContext.PROCEED); if (success) { newResult.reportActivity(); } else { OperationContext val = nonces.remove(nk); assert val == newResult; } if (newResult.hasWait()) { LOG.debug("Conflict with running op ended: " + nk + ", " + newResult); newResult.notifyAll(); } } } ServerNonceManager(Configuration conf); @VisibleForTesting void setConflictWaitIterationMs(int conflictWaitIterationMs); boolean startOperation(long group, long nonce, Stoppable stoppable); void endOperation(long group, long nonce, boolean success); void reportOperationFromWal(long group, long nonce, long writeTime); Chore createCleanupChore(Stoppable stoppable); static final String HASH_NONCE_GRACE_PERIOD_KEY; }
@Test public void testNoEndWithoutStart() { ServerNonceManager nm = createManager(); try { nm.endOperation(NO_NONCE, 1, true); fail("Should have thrown"); } catch (AssertionError err) {} }
HttpServer implements FilterContainer { @Override public String toString() { if (listeners.size() == 0) { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); for (ListenerInfo li : listeners) { Connector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); } return sb.toString(); } } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort ); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName, final String pathSpec); void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname, Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname, Map<String, String> parameters); static void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser); static final Log LOG; static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; }
@Test public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); for (int i = 0 ; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); }
OpenRegionHandler extends EventHandler { @Override public void process() throws IOException { boolean openSuccessful = false; boolean transitionedToOpening = false; final String regionName = regionInfo.getRegionNameAsString(); HRegion region = null; try { if (this.server.isStopped() || this.rsServices.isStopping()) { return; } final String encodedName = regionInfo.getEncodedName(); if (this.rsServices.getFromOnlineRegions(encodedName) != null) { LOG.error("Region " + encodedName + " was already online when we started processing the opening. " + "Marking this new attempt as failed"); return; } if (!isRegionStillOpening()){ LOG.error("Region " + encodedName + " opening cancelled"); return; } if (useZKForAssignment && !coordination.transitionFromOfflineToOpening(regionInfo, ord)) { LOG.warn("Region was hijacked? Opening cancelled for encodedName=" + encodedName); return; } transitionedToOpening = true; region = openRegion(); if (region == null) { return; } boolean failed = true; if (!useZKForAssignment || coordination.tickleOpening(ord, regionInfo, rsServices, "post_region_open")) { if (updateMeta(region)) { failed = false; } } if (failed || this.server.isStopped() || this.rsServices.isStopping()) { return; } if (!isRegionStillOpening() || (useZKForAssignment && !coordination.transitionToOpened(region, ord))) { return; } this.rsServices.addToOnlineRegions(region); openSuccessful = true; LOG.debug("Opened " + regionName + " on " + this.server.getServerName()); } finally { if (!openSuccessful) { doCleanUpOnFailedOpen(region, transitionedToOpening, ord); } final Boolean current = this.rsServices.getRegionsInTransitionInRS(). remove(this.regionInfo.getEncodedNameAsBytes()); if (openSuccessful) { if (current == null) { LOG.error("Bad state: we've just opened a region that was NOT in transition. Region=" + regionName); } else if (Boolean.FALSE.equals(current)) { LOG.error("Race condition: we've finished to open a region, while a close was requested " + " on region=" + regionName + ". It can be a critical error, as a region that" + " should be closed is now opened. Closing it now"); cleanupFailedOpen(region); } } } } OpenRegionHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, HTableDescriptor htd, OpenRegionCoordination coordination, OpenRegionCoordination.OpenRegionDetails ord); protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, final HRegionInfo regionInfo, final HTableDescriptor htd, EventType eventType, OpenRegionCoordination coordination, OpenRegionCoordination.OpenRegionDetails ord); HRegionInfo getRegionInfo(); @Override void process(); }
@Test public void testRegionServerAbortionDueToFailureTransitioningToOpened() throws IOException, NodeExistsException, KeeperException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = TEST_HRI; HRegion region = HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU .getConfiguration(), htd); assertNotNull(region); try { ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); ZkOpenRegionCoordination openRegionCoordination = new ZkOpenRegionCoordination(csm, server.getZooKeeper()) { @Override public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord) throws IOException { ZooKeeperWatcher zkw = server.getZooKeeper(); String node = ZKAssign.getNodeName(zkw, hri.getEncodedName()); try { ZKUtil.deleteNodeFailSilent(zkw, node); } catch (KeeperException e) { throw new RuntimeException("Ugh failed delete of " + node, e); } return super.transitionToOpened(r, ord); } }; OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd, openRegionCoordination, zkCrd); rss.getRegionsInTransitionInRS().put( hri.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); rss.getRegionsInTransitionInRS().put( hri.getEncodedNameAsBytes(), Boolean.TRUE); ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName()); handler.process(); } catch (IOException ioe) { } finally { HRegion.closeHRegion(region); } assertTrue("region server should have aborted", server.isAborted()); } @Test public void testFailedOpenRegion() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, csm.getOpenRegionCoordination(), zkCrd) { @Override HRegion openRegion() { return null; } }; rsServices.getRegionsInTransitionInRS().put( TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); } @Test public void testFailedUpdateMeta() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, csm.getOpenRegionCoordination(), zkCrd) { @Override boolean updateMeta(final HRegion r) { return false; } }; rsServices.getRegionsInTransitionInRS().put( TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); } @Test public void testTransitionToFailedOpenEvenIfCleanupFails() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, csm.getOpenRegionCoordination(), zkCrd) { @Override boolean updateMeta(HRegion r) { return false; }; @Override void cleanupFailedOpen(HRegion region) throws IOException { throw new IOException("FileSystem got closed."); } }; rsServices.getRegionsInTransitionInRS().put(TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); try { handler.process(); } catch (Exception e) { } RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); } @Test public void testTransitionToFailedOpenFromOffline() throws Exception { Server server = new MockServer(HTU); RegionServerServices rsServices = HTU.createMockRegionServerService(server.getServerName()); ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName()); ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager(); csm.initialize(server); csm.start(); ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd = new ZkOpenRegionCoordination.ZkOpenRegionDetails(); zkCrd.setServerName(server.getServerName()); ZkOpenRegionCoordination openRegionCoordination = new ZkOpenRegionCoordination(csm, server.getZooKeeper()) { @Override public boolean transitionFromOfflineToOpening(HRegionInfo regionInfo, OpenRegionDetails ord) { return false; } }; OpenRegionHandler handler = new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD, openRegionCoordination, zkCrd); rsServices.getRegionsInTransitionInRS().put(TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName())); assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, rt.getEventType()); }
CloseRegionHandler extends EventHandler { @Override public void process() { try { String name = regionInfo.getRegionNameAsString(); LOG.debug("Processing close of " + name); String encodedRegionName = regionInfo.getEncodedName(); HRegion region = this.rsServices.getFromOnlineRegions(encodedRegionName); if (region == null) { LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring"); return; } try { if (useZKForAssignment && closeRegionCoordination.checkClosingState( regionInfo, closeRegionDetails)) { return; } if (region.close(abort) == null) { LOG.warn("Can't close region: was already closed during close(): " + regionInfo.getRegionNameAsString()); return; } } catch (IOException ioe) { server.abort("Unrecoverable exception while closing region " + regionInfo.getRegionNameAsString() + ", still finishing close", ioe); throw new RuntimeException(ioe); } this.rsServices.removeFromOnlineRegions(region, destination); if (!useZKForAssignment) { rsServices.reportRegionTransition(TransitionCode.CLOSED, regionInfo); } else { closeRegionCoordination.setClosedState(region, this.server.getServerName(), closeRegionDetails); } LOG.debug("Closed " + region.getRegionNameAsString()); } finally { this.rsServices.getRegionsInTransitionInRS(). remove(this.regionInfo.getEncodedNameAsBytes()); } } CloseRegionHandler(final Server server, final RegionServerServices rsServices, final HRegionInfo regionInfo, final boolean abort, CloseRegionCoordination closeRegionCoordination, CloseRegionCoordination.CloseRegionDetails crd); CloseRegionHandler(final Server server, final RegionServerServices rsServices, final HRegionInfo regionInfo, final boolean abort, CloseRegionCoordination closeRegionCoordination, CloseRegionCoordination.CloseRegionDetails crd, ServerName destination); CloseRegionHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, boolean abort, CloseRegionCoordination closeRegionCoordination, CloseRegionCoordination.CloseRegionDetails crd, EventType eventType); protected CloseRegionHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, boolean abort, CloseRegionCoordination closeRegionCoordination, CloseRegionCoordination.CloseRegionDetails crd, EventType eventType, ServerName destination); HRegionInfo getRegionInfo(); @Override void process(); }
@Test public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException { final Server server = new MockServer(HTU, false); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); HRegion region = HTU.createLocalHRegion(hri, htd); try { assertNotNull(region); HRegion spy = Mockito.spy(region); final boolean abort = false; Mockito.when(spy.close(abort)). thenThrow(new IOException("Mocked failed close!")); rss.addToOnlineRegions(spy); assertFalse(server.isStopped()); ZkCoordinatedStateManager consensusProvider = new ZkCoordinatedStateManager(); consensusProvider.initialize(server); consensusProvider.start(); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(false); zkCrd.setExpectedVersion(-1); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, consensusProvider.getCloseRegionCoordination(), zkCrd); boolean throwable = false; try { handler.process(); } catch (Throwable t) { throwable = true; } finally { assertTrue(throwable); assertTrue(server.isStopped()); } } finally { HRegion.closeHRegion(region); } } @Test public void testZKClosingNodeVersionMismatch() throws IOException, NodeExistsException, KeeperException, DeserializationException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = TEST_HRI; ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager(); coordinationProvider.initialize(server); coordinationProvider.start(); OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination()); int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName()); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(true); zkCrd.setExpectedVersion(versionOfClosingNode+1); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd); handler.process(); RegionTransition rt = RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); assertTrue(rt.getEventType().equals(EventType.M_ZK_REGION_CLOSING )); } @Test public void testCloseRegion() throws IOException, NodeExistsException, KeeperException, DeserializationException { final Server server = new MockServer(HTU); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; HRegionInfo hri = TEST_HRI; ZkCoordinatedStateManager coordinationProvider = new ZkCoordinatedStateManager(); coordinationProvider.initialize(server); coordinationProvider.start(); OpenRegion(server, rss, htd, hri, coordinationProvider.getOpenRegionCoordination()); int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(), hri, server.getServerName()); ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd = new ZkCloseRegionCoordination.ZkCloseRegionDetails(); zkCrd.setPublishStatusInZk(true); zkCrd.setExpectedVersion(versionOfClosingNode); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, coordinationProvider.getCloseRegionCoordination(), zkCrd); handler.process(); RegionTransition rt = RegionTransition.parseFrom( ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); assertTrue(rt.getEventType().equals(EventType.RS_ZK_REGION_CLOSED)); }
RegionSplitPolicy extends Configured { public static RegionSplitPolicy create(HRegion region, Configuration conf) throws IOException { Class<? extends RegionSplitPolicy> clazz = getSplitPolicyClass( region.getTableDesc(), conf); RegionSplitPolicy policy = ReflectionUtils.newInstance(clazz, conf); policy.configureForRegion(region); return policy; } static RegionSplitPolicy create(HRegion region, Configuration conf); static Class<? extends RegionSplitPolicy> getSplitPolicyClass( HTableDescriptor htd, Configuration conf); }
@Test public void testCreateDefault() throws IOException { conf.setLong(HConstants.HREGION_MAX_FILESIZE, 1234L); ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); assertEquals(1234L, policy.getDesiredMaxFileSize()); htd.setMaxFileSize(9999L); policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); assertEquals(9999L, policy.getDesiredMaxFileSize()); } @Test public void testConstantSizePolicy() throws IOException { htd.setMaxFileSize(1024L); ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); doConstantSizePolicyTests(policy); } @Test public void testCreateDefault() throws IOException { conf.setLong(HConstants.HREGION_MAX_FILESIZE, 1234L); ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); assertWithinJitter(1234L, policy.getDesiredMaxFileSize()); htd.setMaxFileSize(9999L); policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); assertWithinJitter(9999L, policy.getDesiredMaxFileSize()); }
HttpServer implements FilterContainer { public static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException { Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); if (!conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthenticated users are not " + "authorized to access this page."); return false; } if (servletContext.getAttribute(ADMINS_ACL) != null && !userHasAdministratorAccess(servletContext, remoteUser)) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User " + remoteUser + " is unauthorized to access this page."); return false; } return true; } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort ); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName, final String pathSpec); void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname, Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname, Map<String, String> parameters); static void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser); static final Log LOG; static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; }
@Test public void testHasAdministratorAccess() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false); ServletContext context = Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null); HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); response = Mockito.mock(HttpServletResponse.class); Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); }
RegionSplitPolicy extends Configured { protected byte[] getSplitPoint() { byte[] explicitSplitPoint = this.region.getExplicitSplitPoint(); if (explicitSplitPoint != null) { return explicitSplitPoint; } Map<byte[], Store> stores = region.getStores(); byte[] splitPointFromLargestStore = null; long largestStoreSize = 0; for (Store s : stores.values()) { byte[] splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint != null && largestStoreSize < storeSize) { splitPointFromLargestStore = splitPoint; largestStoreSize = storeSize; } } return splitPointFromLargestStore; } static RegionSplitPolicy create(HRegion region, Configuration conf); static Class<? extends RegionSplitPolicy> getSplitPolicyClass( HTableDescriptor htd, Configuration conf); }
@Test public void testGetSplitPoint() throws IOException { ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf); assertFalse(policy.shouldSplit()); assertNull(policy.getSplitPoint()); HStore mockStore = Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("store 1 split")) .when(mockStore).getSplitPoint(); stores.put(new byte[]{1}, mockStore); assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint())); HStore mockStore2 = Mockito.mock(HStore.class); Mockito.doReturn(4000L).when(mockStore2).getSize(); Mockito.doReturn(true).when(mockStore2).canSplit(); Mockito.doReturn(Bytes.toBytes("store 2 split")) .when(mockStore2).getSplitPoint(); stores.put(new byte[]{2}, mockStore2); assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint())); }
HeapMemoryManager { public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server) { BlockCache blockCache = CacheConfig.instantiateBlockCache(conf); if (blockCache instanceof ResizableBlockCache) { return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server); } return null; } @VisibleForTesting HeapMemoryManager(ResizableBlockCache blockCache, FlushRequester memStoreFlusher, Server server); static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server); void start(); void stop(); static final String BLOCK_CACHE_SIZE_MAX_RANGE_KEY; static final String BLOCK_CACHE_SIZE_MIN_RANGE_KEY; static final String MEMSTORE_SIZE_MAX_RANGE_KEY; static final String MEMSTORE_SIZE_MIN_RANGE_KEY; static final String HBASE_RS_HEAP_MEMORY_TUNER_PERIOD; static final int HBASE_RS_HEAP_MEMORY_TUNER_DEFAULT_PERIOD; static final String HBASE_RS_HEAP_MEMORY_TUNER_CLASS; }
@Test public void testWhenMemstoreAndBlockCacheMaxMinChecksFails() throws Exception { BlockCacheStub blockCache = new BlockCacheStub(0); MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub(0); Configuration conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.06f); try { new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf)); fail(); } catch (RuntimeException e) { } conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.2f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); try { new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf)); fail(); } catch (RuntimeException e) { } }
StripeStoreEngine extends StoreEngine<StripeStoreFlusher, StripeCompactionPolicy, StripeCompactor, StripeStoreFileManager> { @Override public CompactionContext createCompaction() { return new StripeCompaction(); } @Override boolean needsCompaction(List<StoreFile> filesCompacting); @Override CompactionContext createCompaction(); }
@Test public void testCompactionContextForceSelect() throws Exception { Configuration conf = HBaseConfiguration.create(); int targetCount = 2; conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, targetCount); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 2); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName()); TestStoreEngine se = createEngine(conf); StripeCompactor mockCompactor = mock(StripeCompactor.class); se.setCompactorOverride(mockCompactor); when(mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class))) .thenReturn(new ArrayList<Path>()); StoreFile sf = createFile(); ArrayList<StoreFile> compactUs = al(sf, createFile(), createFile()); se.getStoreFileManager().loadFiles(compactUs); CompactionContext compaction = se.createCompaction(); compaction.select(al(), false, false, false); assertEquals(3, compaction.getRequest().getFiles().size()); compactUs.remove(sf); CompactionRequest req = new CompactionRequest(compactUs); compaction.forceSelect(req); assertEquals(2, compaction.getRequest().getFiles().size()); assertFalse(compaction.getRequest().getFiles().contains(sf)); compaction.compact(); verify(mockCompactor, times(1)).compact(compaction.getRequest(), targetCount, 0L, StripeStoreFileManager.OPEN_KEY, StripeStoreFileManager.OPEN_KEY, null, null); } @Test public void testCompactionContextForceSelect() throws Exception { Configuration conf = HBaseConfiguration.create(); int targetCount = 2; conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, targetCount); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 2); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName()); TestStoreEngine se = createEngine(conf); StripeCompactor mockCompactor = mock(StripeCompactor.class); se.setCompactorOverride(mockCompactor); when( mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class), any(CompactionThroughputController.class), any(User.class))) .thenReturn(new ArrayList<Path>()); StoreFile sf = createFile(); ArrayList<StoreFile> compactUs = al(sf, createFile(), createFile()); se.getStoreFileManager().loadFiles(compactUs); CompactionContext compaction = se.createCompaction(); compaction.select(al(), false, false, false); assertEquals(3, compaction.getRequest().getFiles().size()); compactUs.remove(sf); CompactionRequest req = new CompactionRequest(compactUs); compaction.forceSelect(req); assertEquals(2, compaction.getRequest().getFiles().size()); assertFalse(compaction.getRequest().getFiles().contains(sf)); compaction.compact(NoLimitCompactionThroughputController.INSTANCE); verify(mockCompactor, times(1)).compact(compaction.getRequest(), targetCount, 0L, StripeStoreFileManager.OPEN_KEY, StripeStoreFileManager.OPEN_KEY, null, null, NoLimitCompactionThroughputController.INSTANCE, null); }
Compressor { static short toShort(byte hi, byte lo) { short s = (short) (((hi & 0xFF) << 8) | (lo & 0xFF)); Preconditions.checkArgument(s >= 0); return s; } static void main(String[] args); }
@Test public void testToShort() { short s = 1; assertEquals(s, Compressor.toShort((byte)0, (byte)1)); s <<= 8; assertEquals(s, Compressor.toShort((byte)1, (byte)0)); } @Test (expected = IllegalArgumentException.class) public void testNegativeToShort() { Compressor.toShort((byte)0xff, (byte)0xff); }
HttpServer implements FilterContainer { public void stop() throws Exception { MultiException exception = null; for (ListenerInfo li : listeners) { if (!li.isManaged) { continue; } try { li.listener.close(); } catch (Exception e) { LOG.error( "Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } try { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } if (exception != null) { exception.ifExceptionThrow(); } } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort ); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName, final String pathSpec); void addServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname, Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname, Map<String, String> parameters); static void defineFilter(Context ctx, String name, String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser); static final Log LOG; static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; }
@Test public void testHttpServerBuilderWithExternalConnector() throws Exception { Connector c = mock(Connector.class); doReturn("localhost").when(c).getHost(); HttpServer s = new HttpServer.Builder().setName("test").setConnector(c) .build(); s.stop(); }
HRegion implements HeapSize { public static void closeHRegion(final HRegion r) throws IOException { if (r == null) return; r.close(); if (r.getLog() == null) return; r.getLog().closeAndDelete(); } @Deprecated HRegion(final Path tableDir, final HLog log, final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final HLog log, final Configuration confParam, final HTableDescriptor htd, final RegionServerServices rsServices); long getSmallestReadPoint(); @Deprecated long initialize(); boolean hasReferences(); HDFSBlocksDistribution getHDFSBlocksDistribution(); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); AtomicLong getMemstoreSize(); long addAndGetGlobalMemstoreSize(long memStoreSize); HRegionInfo getRegionInfo(); boolean isClosed(); boolean isClosing(); void setRecovering(boolean newState); boolean isRecovering(); boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConsistencyControl getMVCC(); long getReadpoint(IsolationLevel isolationLevel); boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); void waitForFlushesAndCompactions(); byte [] getStartKey(); byte [] getEndKey(); long getRegionId(); byte [] getRegionName(); String getRegionNameAsString(); HTableDescriptor getTableDesc(); HLog getLog(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); long getLastFlushTime(); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); void compactStores(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store); FlushResult flushcache(); Result getClosestRowBefore(final byte [] row, final byte [] family); RegionScanner getScanner(Scan scan); void delete(Delete delete); void put(Put put); OperationStatus[] batchMutate( Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); OperationStatus[] batchReplay(HLogSplitter.MutationReplay[] mutations); boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Mutation w, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); Store getStore(final byte[] column); Map<byte[], Store> getStores(); List<String> getStoreFileList(final byte [][] columns); RowLock getRowLock(byte[] row, boolean waitForLock); RowLock getRowLock(byte[] row); void releaseRowLocks(List<RowLock> rowLocks); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated static Path getRegionDir(final Path rootdir, final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte [] row); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); Result get(final Get get); void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce); Result append(Append append); Result append(Append append, long nonceGroup, long nonce); Result increment(Increment increment); Result increment(Increment increment, long nonceGroup, long nonce); @Override long heapSize(); boolean registerService(Service instance); Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); void startRegionOperation(); void closeRegionOperation(); void closeRegionOperation(Operation operation); static void main(String[] args); long getOpenSeqNum(); Map<byte[], Long> getMaxStoreSeqIdForLogReplay(); CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting AtomicLong getSequenceId(); static final Log LOG; static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; final AtomicLong memstoreSize; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; }
@Test public void testGetWhileRegionClose() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); Configuration hc = initSplit(); int numRows = 100; byte[][] families = { fam1, fam2, fam3 }; String method = name.getMethodName(); this.region = initHRegion(tableName, method, hc, families); try { final int startRow = 100; putData(startRow, numRows, qual1, families); putData(startRow, numRows, qual2, families); putData(startRow, numRows, qual3, families); final AtomicBoolean done = new AtomicBoolean(false); final AtomicInteger gets = new AtomicInteger(0); GetTillDoneOrException[] threads = new GetTillDoneOrException[10]; try { for (int i = 0; i < threads.length / 2; i++) { threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets); threads[i].setDaemon(true); threads[i].start(); } this.region.closing.set(true); for (int i = threads.length / 2; i < threads.length; i++) { threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets); threads[i].setDaemon(true); threads[i].start(); } } finally { if (this.region != null) { HRegion.closeHRegion(this.region); } } done.set(true); for (GetTillDoneOrException t : threads) { try { t.join(); } catch (InterruptedException e) { e.printStackTrace(); } if (t.e != null) { LOG.info("Exception=" + t.e); assertFalse("Found a NPE in " + t.getName(), t.e instanceof NullPointerException); } } } finally { HRegion.closeHRegion(this.region); this.region = null; } } @Test public void testWeirdCacheBehaviour() throws Exception { byte[] TABLE = Bytes.toBytes("testWeirdCacheBehaviour"); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"), Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"), Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") }; this.region = initHRegion(TABLE, getName(), CONF, FAMILIES); try { String value = "this is the value"; String value2 = "this is some other value"; String keyPrefix1 = "prefix1"; String keyPrefix2 = "prefix2"; String keyPrefix3 = "prefix3"; putRows(this.region, 3, value, keyPrefix1); putRows(this.region, 3, value, keyPrefix2); putRows(this.region, 3, value, keyPrefix3); putRows(this.region, 3, value2, keyPrefix1); putRows(this.region, 3, value2, keyPrefix2); putRows(this.region, 3, value2, keyPrefix3); System.out.println("Checking values for key: " + keyPrefix1); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix1, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix2); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix2, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix3); assertEquals("Got back incorrect number of rows from scan", 3, getNumberOfRows(keyPrefix3, value2, this.region)); deleteColumns(this.region, value2, keyPrefix1); deleteColumns(this.region, value2, keyPrefix2); deleteColumns(this.region, value2, keyPrefix3); System.out.println("Starting important checks....."); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1, 0, getNumberOfRows(keyPrefix1, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2, 0, getNumberOfRows(keyPrefix2, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, getNumberOfRows(keyPrefix3, value2, this.region)); } finally { HRegion.closeHRegion(this.region); this.region = null; } }
HRegion implements HeapSize { public static HRegion merge(final HRegion a, final HRegion b) throws IOException { if (!a.getRegionInfo().getTable().equals(b.getRegionInfo().getTable())) { throw new IOException("Regions do not belong to the same table"); } FileSystem fs = a.getRegionFileSystem().getFileSystem(); a.flushcache(); b.flushcache(); a.compactStores(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + a); a.getRegionFileSystem().logFileSystemState(LOG); } b.compactStores(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + b); b.getRegionFileSystem().logFileSystemState(LOG); } RegionMergeTransaction rmt = new RegionMergeTransaction(a, b, true); if (!rmt.prepare(null)) { throw new IOException("Unable to merge regions " + a + " and " + b); } HRegionInfo mergedRegionInfo = rmt.getMergedRegionInfo(); LOG.info("starting merge of regions: " + a + " and " + b + " into new region " + mergedRegionInfo.getRegionNameAsString() + " with start key <" + Bytes.toStringBinary(mergedRegionInfo.getStartKey()) + "> and end key <" + Bytes.toStringBinary(mergedRegionInfo.getEndKey()) + ">"); HRegion dstRegion; try { dstRegion = rmt.execute(null, null); } catch (IOException ioe) { rmt.rollback(null, null); throw new IOException("Failed merging region " + a + " and " + b + ", and successfully rolled back"); } dstRegion.compactStores(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for new region"); dstRegion.getRegionFileSystem().logFileSystemState(LOG); } if (dstRegion.getRegionFileSystem().hasReferences(dstRegion.getTableDesc())) { throw new IOException("Merged region " + dstRegion + " still has references after the compaction, is compaction canceled?"); } HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo()); HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo()); LOG.info("merge completed. New region is " + dstRegion); return dstRegion; } @Deprecated HRegion(final Path tableDir, final HLog log, final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final HLog log, final Configuration confParam, final HTableDescriptor htd, final RegionServerServices rsServices); long getSmallestReadPoint(); @Deprecated long initialize(); boolean hasReferences(); HDFSBlocksDistribution getHDFSBlocksDistribution(); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); AtomicLong getMemstoreSize(); long addAndGetGlobalMemstoreSize(long memStoreSize); HRegionInfo getRegionInfo(); boolean isClosed(); boolean isClosing(); void setRecovering(boolean newState); boolean isRecovering(); boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConsistencyControl getMVCC(); long getReadpoint(IsolationLevel isolationLevel); boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); void waitForFlushesAndCompactions(); byte [] getStartKey(); byte [] getEndKey(); long getRegionId(); byte [] getRegionName(); String getRegionNameAsString(); HTableDescriptor getTableDesc(); HLog getLog(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); long getLastFlushTime(); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); void compactStores(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store); FlushResult flushcache(); Result getClosestRowBefore(final byte [] row, final byte [] family); RegionScanner getScanner(Scan scan); void delete(Delete delete); void put(Put put); OperationStatus[] batchMutate( Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); OperationStatus[] batchReplay(HLogSplitter.MutationReplay[] mutations); boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Mutation w, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); Store getStore(final byte[] column); Map<byte[], Store> getStores(); List<String> getStoreFileList(final byte [][] columns); RowLock getRowLock(byte[] row, boolean waitForLock); RowLock getRowLock(byte[] row); void releaseRowLocks(List<RowLock> rowLocks); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated static Path getRegionDir(final Path rootdir, final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte [] row); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); Result get(final Get get); void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce); Result append(Append append); Result append(Append append, long nonceGroup, long nonce); Result increment(Increment increment); Result increment(Increment increment, long nonceGroup, long nonce); @Override long heapSize(); boolean registerService(Service instance); Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); void startRegionOperation(); void closeRegionOperation(); void closeRegionOperation(Operation operation); static void main(String[] args); long getOpenSeqNum(); Map<byte[], Long> getMaxStoreSeqIdForLogReplay(); CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting AtomicLong getSequenceId(); static final Log LOG; static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; final AtomicLong memstoreSize; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; }
@Test public void testMerge() throws IOException { byte[][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); String method = this.getName(); this.region = initHRegion(tableName, method, hc, families); try { LOG.info("" + HBaseTestCase.addContent(region, fam3)); region.flushcache(); region.compactStores(); byte[] splitRow = region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion[] subregions = splitRegion(region, splitRow); try { for (int i = 0; i < subregions.length; i++) { HRegion.openHRegion(subregions[i], null); subregions[i].compactStores(); } Path oldRegionPath = region.getRegionFileSystem().getRegionDir(); Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir(); Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir(); long startTime = System.currentTimeMillis(); region = HRegion.mergeAdjacent(subregions[0], subregions[1]); LOG.info("Merge regions elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); FILESYSTEM.delete(oldRegion1, true); FILESYSTEM.delete(oldRegion2, true); FILESYSTEM.delete(oldRegionPath, true); LOG.info("splitAndMerge completed."); } finally { for (int i = 0; i < subregions.length; i++) { try { HRegion.closeHRegion(subregions[i]); } catch (IOException e) { } } } } finally { HRegion.closeHRegion(this.region); this.region = null; } }
HRegion implements HeapSize { public HDFSBlocksDistribution getHDFSBlocksDistribution() { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); synchronized (this.stores) { for (Store store : this.stores.values()) { for (StoreFile sf : store.getStorefiles()) { HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution(); hdfsBlocksDistribution.add(storeFileBlocksDistribution); } } } return hdfsBlocksDistribution; } @Deprecated HRegion(final Path tableDir, final HLog log, final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, final RegionServerServices rsServices); HRegion(final HRegionFileSystem fs, final HLog log, final Configuration confParam, final HTableDescriptor htd, final RegionServerServices rsServices); long getSmallestReadPoint(); @Deprecated long initialize(); boolean hasReferences(); HDFSBlocksDistribution getHDFSBlocksDistribution(); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo); static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath); AtomicLong getMemstoreSize(); long addAndGetGlobalMemstoreSize(long memStoreSize); HRegionInfo getRegionInfo(); boolean isClosed(); boolean isClosing(); void setRecovering(boolean newState); boolean isRecovering(); boolean isAvailable(); boolean isSplittable(); boolean isMergeable(); boolean areWritesEnabled(); MultiVersionConsistencyControl getMVCC(); long getReadpoint(IsolationLevel isolationLevel); boolean isLoadingCfsOnDemandDefault(); Map<byte[], List<StoreFile>> close(); Map<byte[], List<StoreFile>> close(final boolean abort); void waitForFlushesAndCompactions(); byte [] getStartKey(); byte [] getEndKey(); long getRegionId(); byte [] getRegionName(); String getRegionNameAsString(); HTableDescriptor getTableDesc(); HLog getLog(); FileSystem getFilesystem(); HRegionFileSystem getRegionFileSystem(); long getLastFlushTime(); long getLargestHStoreSize(); KeyValue.KVComparator getComparator(); void compactStores(final boolean majorCompaction); void compactStores(); boolean compact(CompactionContext compaction, Store store); FlushResult flushcache(); Result getClosestRowBefore(final byte [] row, final byte [] family); RegionScanner getScanner(Scan scan); void delete(Delete delete); void put(Put put); OperationStatus[] batchMutate( Mutation[] mutations, long nonceGroup, long nonce); OperationStatus[] batchMutate(Mutation[] mutations); OperationStatus[] batchReplay(HLogSplitter.MutationReplay[] mutations); boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Mutation w, boolean writeToWAL); void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare); Store getStore(final byte[] column); Map<byte[], Store> getStores(); List<String> getStoreFileList(final byte [][] columns); RowLock getRowLock(byte[] row, boolean waitForLock); RowLock getRowLock(byte[] row); void releaseRowLocks(List<RowLock> rowLocks); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId); boolean bulkLoadHFiles(List<Pair<byte[], String>> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor); static void closeHRegion(final HRegion r); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog, final boolean initialize, final boolean ignoreHLog); static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, final HTableDescriptor hTableDescriptor, final HLog hlog); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf); static HRegion openHRegion(final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final Configuration conf, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final Configuration conf, final FileSystem fs, final Path rootDir, final Path tableDir, final HRegionInfo info, final HTableDescriptor htd, final HLog wal, final RegionServerServices rsServices, final CancelableProgressable reporter); static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter); static void addRegionToMETA(final HRegion meta, final HRegion r); @Deprecated static Path getRegionDir(final Path tabledir, final String name); @Deprecated static Path getRegionDir(final Path rootdir, final HRegionInfo info); static boolean rowIsInRange(HRegionInfo info, final byte [] row); static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB); static HRegion merge(final HRegion a, final HRegion b); Result get(final Get get); void mutateRow(RowMutations rm); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock); void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce); void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce); Result append(Append append); Result append(Append append, long nonceGroup, long nonce); Result increment(Increment increment); Result increment(Increment increment, long nonceGroup, long nonce); @Override long heapSize(); boolean registerService(Service instance); Message execService(RpcController controller, CoprocessorServiceCall call); byte[] checkSplit(); int getCompactPriority(); RegionCoprocessorHost getCoprocessorHost(); void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost); void startRegionOperation(); void closeRegionOperation(); void closeRegionOperation(Operation operation); static void main(String[] args); long getOpenSeqNum(); Map<byte[], Long> getMaxStoreSeqIdForLogReplay(); CompactionState getCompactionState(); void reportCompactionRequestStart(boolean isMajor); void reportCompactionRequestEnd(boolean isMajor, int numFiles, long filesSizeCompacted); @VisibleForTesting AtomicLong getSequenceId(); static final Log LOG; static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY; final AtomicLong memstoreSize; static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL; static final int DEFAULT_CACHE_FLUSH_INTERVAL; static final String MEMSTORE_FLUSH_PER_CHANGES; static final long DEFAULT_FLUSH_PER_CHANGES; static final long MAX_FLUSH_PER_CHANGES; static final long FIXED_OVERHEAD; static final long DEEP_OVERHEAD; }
@Test public void testgetHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.replication", 2); MiniHBaseCluster cluster = null; String dataNodeHosts[] = new String[] { "host1", "host2", "host3" }; int regionServersCount = 3; try { cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts); byte[][] families = { fam1, fam2 }; HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families); byte row[] = Bytes.toBytes("row1"); byte col[] = Bytes.toBytes("col1"); Put put = new Put(row); put.add(fam1, col, 1, Bytes.toBytes("test1")); put.add(fam2, col, 1, Bytes.toBytes("test2")); ht.put(put); HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())) .get(0); firstRegion.flushcache(); HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution(); long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight(); StringBuilder sb = new StringBuilder(); for (String host: blocksDistribution1.getTopHosts()) { if (sb.length() > 0) sb.append(", "); sb.append(host); sb.append("="); sb.append(blocksDistribution1.getWeight(host)); } String topHost = blocksDistribution1.getTopHosts().get(0); long topHostWeight = blocksDistribution1.getWeight(topHost); String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString(); LOG.info(msg); assertTrue(msg, uniqueBlocksWeight1 == topHostWeight); HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution( htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo()); long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight(); assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2); ht.close(); } finally { if (cluster != null) { htu.shutdownMiniCluster(); } } }
ExportSnapshot extends Configured implements Tool { static List<List<Pair<SnapshotFileInfo, Long>>> getBalancedSplits( final List<Pair<SnapshotFileInfo, Long>> files, final int ngroups) { Collections.sort(files, new Comparator<Pair<SnapshotFileInfo, Long>>() { public int compare(Pair<SnapshotFileInfo, Long> a, Pair<SnapshotFileInfo, Long> b) { long r = a.getSecond() - b.getSecond(); return (r < 0) ? -1 : ((r > 0) ? 1 : 0); } }); List<List<Pair<SnapshotFileInfo, Long>>> fileGroups = new LinkedList<List<Pair<SnapshotFileInfo, Long>>>(); long[] sizeGroups = new long[ngroups]; int hi = files.size() - 1; int lo = 0; List<Pair<SnapshotFileInfo, Long>> group; int dir = 1; int g = 0; while (hi >= lo) { if (g == fileGroups.size()) { group = new LinkedList<Pair<SnapshotFileInfo, Long>>(); fileGroups.add(group); } else { group = fileGroups.get(g); } Pair<SnapshotFileInfo, Long> fileInfo = files.get(hi--); sizeGroups[g] += fileInfo.getSecond(); group.add(fileInfo); g += dir; if (g == ngroups) { dir = -1; g = ngroups - 1; } else if (g < 0) { dir = 1; g = 0; } } if (LOG.isDebugEnabled()) { for (int i = 0; i < sizeGroups.length; ++i) { LOG.debug("export split=" + i + " size=" + StringUtils.humanReadableInt(sizeGroups[i])); } } return fileGroups; } @Override int run(String[] args); static void main(String[] args); }
@Test public void testBalanceSplit() throws Exception { List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<Pair<SnapshotFileInfo, Long>>(); for (long i = 0; i <= 20; i++) { SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() .setType(SnapshotFileInfo.Type.HFILE) .setHfile("file-" + i) .build(); files.add(new Pair<SnapshotFileInfo, Long>(fileInfo, i)); } List<List<Pair<SnapshotFileInfo, Long>>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; verifyBalanceSplit(splits.get(0), split0, 42); String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; verifyBalanceSplit(splits.get(1), split1, 42); String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; verifyBalanceSplit(splits.get(2), split2, 42); String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; verifyBalanceSplit(splits.get(3), split3, 42); String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; verifyBalanceSplit(splits.get(4), split4, 42); }
SnapshotDescriptionUtils { public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) throws IllegalArgumentException { if (!snapshot.hasTable()) { throw new IllegalArgumentException( "Descriptor doesn't apply to a table, so we can't build it."); } long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { time = EnvironmentEdgeManager.currentTimeMillis(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" + EnvironmentEdgeManager.currentTimeMillis() + ")."); SnapshotDescription.Builder builder = snapshot.toBuilder(); builder.setCreationTime(time); snapshot = builder.build(); } return snapshot; } private SnapshotDescriptionUtils(); static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, long defaultMaxWaitTime); static Path getSnapshotRootDir(final Path rootDir); static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir); static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir); static Path getWorkingSnapshotDir(final Path rootDir); static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir); static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir); static final Path getSnapshotsDir(Path rootDir); static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf); static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs); static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir); static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir, FileSystem fs); static final int SNAPSHOT_LAYOUT_VERSION; static final String SNAPSHOTINFO_FILE; static final String SNAPSHOT_TMP_DIR_NAME; static final long NO_SNAPSHOT_START_TIME_SPECIFIED; static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS; static final long DEFAULT_MAX_WAIT_TIME; }
@Test public void testValidateMissingTableName() { Configuration conf = new Configuration(false); try { SnapshotDescriptionUtils.validate(SnapshotDescription.newBuilder().setName("fail").build(), conf); fail("Snapshot was considered valid without a table name"); } catch (IllegalArgumentException e) { LOG.debug("Correctly failed when snapshot doesn't have a tablename"); } }
SnapshotDescriptionUtils { public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir, FileSystem fs) throws SnapshotCreationException, IOException { Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir); LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to " + finishedDir); if (!fs.rename(workingDir, finishedDir)) { throw new SnapshotCreationException("Failed to move working directory(" + workingDir + ") to completed directory(" + finishedDir + ").", snapshot); } } private SnapshotDescriptionUtils(); static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type, long defaultMaxWaitTime); static Path getSnapshotRootDir(final Path rootDir); static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir); static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir); static Path getWorkingSnapshotDir(final Path rootDir); static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir); static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir); static final Path getSnapshotsDir(Path rootDir); static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf); static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs); static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir); static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir, FileSystem fs); static final int SNAPSHOT_LAYOUT_VERSION; static final String SNAPSHOTINFO_FILE; static final String SNAPSHOT_TMP_DIR_NAME; static final long NO_SNAPSHOT_START_TIME_SPECIFIED; static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS; static final long DEFAULT_MAX_WAIT_TIME; }
@Test public void testCompleteSnapshotWithNoSnapshotDirectoryFailure() throws Exception { Path snapshotDir = new Path(root, HConstants.SNAPSHOT_DIR_NAME); Path tmpDir = new Path(snapshotDir, ".tmp"); Path workingDir = new Path(tmpDir, "not_a_snapshot"); assertFalse("Already have working snapshot dir: " + workingDir + " but shouldn't. Test file leak?", fs.exists(workingDir)); SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); try { SnapshotDescriptionUtils.completeSnapshot(snapshot, root, workingDir, fs); fail("Shouldn't successfully complete move of a non-existent directory."); } catch (IOException e) { LOG.info("Correctly failed to move non-existant directory: " + e.getMessage()); } }
ExecutorService { public ExecutorService(final String servername) { super(); this.servername = servername; } ExecutorService(final String servername); void shutdown(); void startExecutorService(final ExecutorType type, final int maxThreads); void submit(final EventHandler eh); void registerListener(final EventType type, final EventHandlerListener listener); EventHandlerListener unregisterListener(final EventType type); Map<String, ExecutorStatus> getAllExecutorStatuses(); }
@Test public void testExecutorService() throws Exception { int maxThreads = 5; int maxTries = 10; int sleepInterval = 10; Server mockedServer = mock(Server.class); when(mockedServer.getConfiguration()).thenReturn(HBaseConfiguration.create()); ExecutorService executorService = new ExecutorService("unit_test"); executorService.startExecutorService( ExecutorType.MASTER_SERVER_OPERATIONS, maxThreads); Executor executor = executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); ThreadPoolExecutor pool = executor.threadPoolExecutor; assertEquals(0, pool.getPoolSize()); AtomicBoolean lock = new AtomicBoolean(true); AtomicInteger counter = new AtomicInteger(0); for (int i = 0; i < maxThreads; i++) { executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); } int tries = 0; while (counter.get() < maxThreads && tries < maxTries) { LOG.info("Waiting for all event handlers to start..."); Thread.sleep(sleepInterval); tries++; } assertEquals(maxThreads, counter.get()); assertEquals(maxThreads, pool.getPoolSize()); ExecutorStatus status = executor.getStatus(); assertTrue(status.queuedEvents.isEmpty()); assertEquals(5, status.running.size()); checkStatusDump(status); synchronized (lock) { lock.set(false); lock.notifyAll(); } while (counter.get() < (maxThreads * 2) && tries < maxTries) { System.out.println("Waiting for all event handlers to finish..."); Thread.sleep(sleepInterval); tries++; } assertEquals(maxThreads * 2, counter.get()); assertEquals(maxThreads, pool.getPoolSize()); for (int i = 0; i < (2 * maxThreads); i++) { executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); } synchronized (lock) { lock.set(false); lock.notifyAll(); } Thread.sleep(ExecutorService.Executor.keepAliveTimeInMillis * 2); assertEquals(maxThreads, pool.getPoolSize()); executorService.shutdown(); assertEquals(0, executorService.getAllExecutorStatuses().size()); executorService.submit( new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); }
MetaMigrationConvertingToPB { static boolean isMetaTableUpdated(final CatalogTracker catalogTracker) throws IOException { List<Result> results = MetaReader.fullScanOfMeta(catalogTracker); if (results == null || results.isEmpty()) { LOG.info("hbase:meta doesn't have any entries to update."); return true; } for (Result r : results) { byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); if (!isMigrated(value)) { return false; } } return true; } static long updateMetaIfNecessary(final MasterServices services); }
@Test public void testMetaUpdatedFlagInROOT() throws Exception { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); boolean metaUpdated = MetaMigrationConvertingToPB. isMetaTableUpdated(master.getCatalogTracker()); assertEquals(true, metaUpdated); verifyMetaRowsAreUpdated(master.getCatalogTracker()); }
JMXListener implements Coprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { int rmiRegistryPort = -1; int rmiConnectorPort = -1; Configuration conf = env.getConfiguration(); if (env instanceof MasterCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Master Environment!"); } else if (env instanceof RegionServerCoprocessorEnvironment) { rmiRegistryPort = conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRMIRegistryPort); rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + rmiConnectorPort); } else if (env instanceof RegionCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Region Environment!"); } synchronized(JMXListener.class) { if (jmxCS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); } else { startConnectorServer(rmiRegistryPort, rmiConnectorPort); } } } static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort); void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort); void stopConnectorServer(); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); static final Log LOG; static final String RMI_REGISTRY_PORT_CONF_KEY; static final String RMI_CONNECTOR_PORT_CONF_KEY; static int defRMIRegistryPort; }
@Test public void testStart() throws Exception { JMXConnector connector = JMXConnectorFactory.connect( JMXListener.buildJMXServiceURL(connectorPort,connectorPort)); MBeanServerConnection mb = connector.getMBeanServerConnection(); String domain = mb.getDefaultDomain(); Assert.assertTrue("default domain is not correct", !domain.isEmpty()); connector.close(); }
JMXListener implements Coprocessor { @Override public void stop(CoprocessorEnvironment env) throws IOException { stopConnectorServer(); } static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort); void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort); void stopConnectorServer(); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); static final Log LOG; static final String RMI_REGISTRY_PORT_CONF_KEY; static final String RMI_CONNECTOR_PORT_CONF_KEY; static int defRMIRegistryPort; }
@Test public void testStop() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); LOG.info("shutdown hbase cluster..."); cluster.shutdown(); LOG.info("wait for the hbase cluster shutdown..."); cluster.waitUntilShutDown(); JMXConnector connector = JMXConnectorFactory.newJMXConnector( JMXListener.buildJMXServiceURL(connectorPort,connectorPort), null); expectedEx.expect(IOException.class); connector.connect(); }
KeyLocker { public ReentrantLock acquireLock(K key) { if (key == null) throw new IllegalArgumentException("key must not be null"); Pair<KeyLock<K>, AtomicInteger> lock; synchronized (this) { lock = locks.get(key); if (lock == null) { lock = new Pair<KeyLock<K>, AtomicInteger>( new KeyLock<K>(this, key), new AtomicInteger(1)); locks.put(key, lock); } else { lock.getSecond().incrementAndGet(); } } lock.getFirst().lock(); return lock.getFirst(); } ReentrantLock acquireLock(K key); Map<K, Lock> acquireLocks(final Set<K> keys); }
@Test public void testLocker(){ KeyLocker<String> locker = new KeyLocker(); ReentrantLock lock1 = locker.acquireLock("l1"); Assert.assertTrue(lock1.isHeldByCurrentThread()); ReentrantLock lock2 = locker.acquireLock("l2"); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock1 != lock2); ReentrantLock lock20 = locker.acquireLock("l2"); Assert.assertTrue(lock20 == lock2); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock20.unlock(); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock2.unlock(); Assert.assertFalse(lock20.isHeldByCurrentThread()); ReentrantLock lock200 = locker.acquireLock("l2"); Assert.assertTrue(lock2 != lock200); lock200.unlock(); Assert.assertFalse(lock200.isHeldByCurrentThread()); Assert.assertTrue(lock1.isHeldByCurrentThread()); lock1.unlock(); Assert.assertFalse(lock1.isHeldByCurrentThread()); }
Counter { public void increment() { add(1); } Counter(); Counter(long initValue); private Counter(Cell initCell); void add(long delta); void increment(); void decrement(); void set(long value); long get(); @Override String toString(); }
@Test public void testIncrement() throws Exception { for(int threadCount : THREAD_COUNTS) { final Counter counter = new Counter(); execute(new Operation() { @Override public void execute() { counter.increment(); } }, threadCount); Assert.assertEquals(threadCount * (long)DATA_COUNT, counter.get()); } }
Threads { public static void sleepWithoutInterrupt(final long msToWait) { long timeMillis = System.currentTimeMillis(); long endTime = timeMillis + msToWait; boolean interrupted = false; while (timeMillis < endTime) { try { Thread.sleep(endTime - timeMillis); } catch (InterruptedException ex) { interrupted = true; } timeMillis = System.currentTimeMillis(); } if (interrupted) { Thread.currentThread().interrupt(); } } static Thread setDaemonThreadRunning(final Thread t); static Thread setDaemonThreadRunning(final Thread t, final String name); static Thread setDaemonThreadRunning(final Thread t, final String name, final UncaughtExceptionHandler handler); static void shutdown(final Thread t); static void shutdown(final Thread t, final long joinwait); static void threadDumpingIsAlive(final Thread t); static void sleep(long millis); static void sleepWithoutInterrupt(final long msToWait); static ThreadPoolExecutor getBoundedCachedThreadPool( int maxCachedThread, long timeout, TimeUnit unit, ThreadFactory threadFactory); static ThreadFactory getNamedThreadFactory(final String prefix); static ThreadFactory newDaemonThreadFactory(final String prefix); static ThreadFactory newDaemonThreadFactory(final String prefix, final UncaughtExceptionHandler handler); }
@Test(timeout=60000) public void testSleepWithoutInterrupt() throws InterruptedException { Thread sleeper = new Thread(new Runnable() { @Override public void run() { LOG.debug("Sleeper thread: sleeping for " + SLEEP_TIME_MS); Threads.sleepWithoutInterrupt(SLEEP_TIME_MS); LOG.debug("Sleeper thread: finished sleeping"); wasInterrupted.set(Thread.currentThread().isInterrupted()); } }); LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)"); sleeper.start(); long startTime = System.currentTimeMillis(); LOG.debug("Main thread: sleeping for 200 ms"); Threads.sleep(200); LOG.debug("Interrupting the sleeper thread and sleeping for 500 ms"); sleeper.interrupt(); Threads.sleep(500); LOG.debug("Interrupting the sleeper thread and sleeping for 800 ms"); sleeper.interrupt(); Threads.sleep(800); LOG.debug("Interrupting the sleeper thread again"); sleeper.interrupt(); sleeper.join(); assertTrue("sleepWithoutInterrupt did not preserve the thread's " + "interrupted status", wasInterrupted.get()); long timeElapsed = System.currentTimeMillis() - startTime; assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " + " sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS); LOG.debug("Target sleep time: " + SLEEP_TIME_MS + ", time elapsed: " + timeElapsed); }
DrainBarrier { public void stopAndDrainOpsOnce() throws InterruptedException { stopAndDrainOps(false); } boolean beginOp(); @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="First, we do change the state before notify, 2nd, it doesn't even matter") void endOp(); void stopAndDrainOps(); void stopAndDrainOpsOnce(); }
@Test public void testMultipleStopOnceAssert() throws Exception { DrainBarrier barrier = new DrainBarrier(); barrier.stopAndDrainOpsOnce(); try { barrier.stopAndDrainOpsOnce(); fail("Should have asserted"); } catch (AssertionError e) { } }
DrainBarrier { public void stopAndDrainOps() throws InterruptedException { stopAndDrainOps(true); } boolean beginOp(); @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", justification="First, we do change the state before notify, 2nd, it doesn't even matter") void endOp(); void stopAndDrainOps(); void stopAndDrainOpsOnce(); }
@Test public void testMultipleSloppyStopsHaveNoEffect() throws Exception { DrainBarrier barrier = new DrainBarrier(); barrier.stopAndDrainOps(); barrier.stopAndDrainOps(); }
FSHDFSUtils extends FSUtils { boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); LOG.info("recoverLease=" + recovered + ", " + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { throw new FileNotFoundException("The given HLog wasn't found at " + p); } else if (e instanceof FileNotFoundException) { throw (FileNotFoundException)e; } LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e); } return recovered; } static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs); @Override void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter); }
@Test (timeout = 30000) public void testRecoverLease() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); }
ConcatenatedLists implements Collection<T> { public void addSublist(List<T> items) { if (!items.isEmpty()) { this.components.add(items); this.size += items.size(); } } void addAllSublists(List<? extends List<T>> items); void addSublist(List<T> items); @Override int size(); @Override boolean isEmpty(); @Override boolean contains(Object o); @Override boolean containsAll(Collection<?> c); @Override Object[] toArray(); @Override @SuppressWarnings("unchecked") U[] toArray(U[] a); @Override boolean add(T e); @Override boolean remove(Object o); @Override boolean addAll(Collection<? extends T> c); @Override boolean removeAll(Collection<?> c); @Override boolean retainAll(Collection<?> c); @Override void clear(); @Override java.util.Iterator<T> iterator(); }
@Test public void testOneOne() { ConcatenatedLists<Long> c = new ConcatenatedLists<Long>(); c.addSublist(Arrays.asList(0L)); verify(c, 0); } @Test public void testOneMany() { ConcatenatedLists<Long> c = new ConcatenatedLists<Long>(); c.addSublist(Arrays.asList(0L, 1L, 2L)); verify(c, 2); }
OrderedBytes { public static int encodeString(PositionedByteRange dst, String val, Order ord) { if (null == val) { return encodeNull(dst, ord); } if (val.contains("\u0000")) throw new IllegalArgumentException("Cannot encode String values containing '\\u0000'"); final int offset = dst.getOffset(), start = dst.getPosition(); dst.put(TEXT); dst.put(val.getBytes(UTF8)); dst.put(TERM); ord.apply(dst.getBytes(), offset + start, dst.getPosition() - start); return dst.getPosition() - start; } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; }
@Test(expected = IllegalArgumentException.class) public void testStringNoNullChars() { PositionedByteRange buff = new SimplePositionedByteRange(3); OrderedBytes.encodeString(buff, "\u0000", Order.ASCENDING); }
OrderedBytes { public static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord) { if (null == val) { encodeNull(dst, ord); if (ASCENDING == ord) return 1; else { dst.put(ord.apply(TERM)); return 2; } } assert dst.getRemaining() >= vlen + (ASCENDING == ord ? 1 : 2); if (DESCENDING == ord) { for (int i = 0; i < vlen; i++) { if (TERM == val[voff + i]) { throw new IllegalArgumentException("0x00 bytes not permitted in value."); } } } final int offset = dst.getOffset(), start = dst.getPosition(); dst.put(BLOB_COPY); dst.put(val, voff, vlen); if (DESCENDING == ord) dst.put(TERM); ord.apply(dst.getBytes(), offset + start, dst.getPosition() - start); return dst.getPosition() - start; } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; }
@Test(expected = IllegalArgumentException.class) public void testBlobCopyNoZeroBytes() { byte[] val = { 0x01, 0x02, 0x00, 0x03 }; byte[] ascExpected = { 0x38, 0x01, 0x02, 0x00, 0x03 }; PositionedByteRange buf = new SimplePositionedByteRange(val.length + 1); OrderedBytes.encodeBlobCopy(buf, val, Order.ASCENDING); assertArrayEquals(ascExpected, buf.getBytes()); buf.set(val.length + 2); OrderedBytes.encodeBlobCopy(buf, val, Order.DESCENDING); fail("test should never get here."); }
OrderedBytes { public static int skip(PositionedByteRange src) { final int start = src.getPosition(); byte header = src.get(); Order ord = (-1 == Integer.signum(header)) ? DESCENDING : ASCENDING; header = ord.apply(header); switch (header) { case NULL: case NEG_INF: return 1; case NEG_LARGE: skipVaruint64(src, DESCENDING != ord); skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case NEG_MED_MIN: case NEG_MED_MIN + 0x01: case NEG_MED_MIN + 0x02: case NEG_MED_MIN + 0x03: case NEG_MED_MIN + 0x04: case NEG_MED_MIN + 0x05: case NEG_MED_MIN + 0x06: case NEG_MED_MIN + 0x07: case NEG_MED_MIN + 0x08: case NEG_MED_MIN + 0x09: case NEG_MED_MAX: skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case NEG_SMALL: skipVaruint64(src, DESCENDING == ord); skipSignificand(src, DESCENDING != ord); return src.getPosition() - start; case ZERO: return 1; case POS_SMALL: skipVaruint64(src, DESCENDING != ord); skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_MED_MIN: case POS_MED_MIN + 0x01: case POS_MED_MIN + 0x02: case POS_MED_MIN + 0x03: case POS_MED_MIN + 0x04: case POS_MED_MIN + 0x05: case POS_MED_MIN + 0x06: case POS_MED_MIN + 0x07: case POS_MED_MIN + 0x08: case POS_MED_MIN + 0x09: case POS_MED_MAX: skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_LARGE: skipVaruint64(src, DESCENDING == ord); skipSignificand(src, DESCENDING == ord); return src.getPosition() - start; case POS_INF: return 1; case NAN: return 1; case FIXED_INT8: src.setPosition(src.getPosition() + 1); return src.getPosition() - start; case FIXED_INT16: src.setPosition(src.getPosition() + 2); return src.getPosition() - start; case FIXED_INT32: src.setPosition(src.getPosition() + 4); return src.getPosition() - start; case FIXED_INT64: src.setPosition(src.getPosition() + 8); return src.getPosition() - start; case FIXED_FLOAT32: src.setPosition(src.getPosition() + 4); return src.getPosition() - start; case FIXED_FLOAT64: src.setPosition(src.getPosition() + 8); return src.getPosition() - start; case TEXT: do { header = ord.apply(src.get()); } while (header != TERM); return src.getPosition() - start; case BLOB_VAR: do { header = ord.apply(src.get()); } while ((byte) (header & 0x80) != TERM); return src.getPosition() - start; case BLOB_COPY: if (Order.DESCENDING == ord) { do { header = ord.apply(src.get()); } while (header != TERM); return src.getPosition() - start; } else { src.setPosition(src.getLength()); return src.getPosition() - start; } default: throw unexpectedHeader(header); } } static int encodeNumeric(PositionedByteRange dst, long val, Order ord); static int encodeNumeric(PositionedByteRange dst, double val, Order ord); static int encodeNumeric(PositionedByteRange dst, BigDecimal val, Order ord); static double decodeNumericAsDouble(PositionedByteRange src); static long decodeNumericAsLong(PositionedByteRange src); static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src); static int encodeString(PositionedByteRange dst, String val, Order ord); static String decodeString(PositionedByteRange src); static int blobVarEncodedLength(int len); static int encodeBlobVar(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobVar(PositionedByteRange src); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, int voff, int vlen, Order ord); static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord); static byte[] decodeBlobCopy(PositionedByteRange src); static int encodeNull(PositionedByteRange dst, Order ord); static int encodeInt8(PositionedByteRange dst, byte val, Order ord); static byte decodeInt8(PositionedByteRange src); static int encodeInt16(PositionedByteRange dst, short val, Order ord); static short decodeInt16(PositionedByteRange src); static int encodeInt32(PositionedByteRange dst, int val, Order ord); static int decodeInt32(PositionedByteRange src); static int encodeInt64(PositionedByteRange dst, long val, Order ord); static long decodeInt64(PositionedByteRange src); static int encodeFloat32(PositionedByteRange dst, float val, Order ord); static float decodeFloat32(PositionedByteRange src); static int encodeFloat64(PositionedByteRange dst, double val, Order ord); static double decodeFloat64(PositionedByteRange src); static boolean isEncodedValue(PositionedByteRange src); static boolean isNull(PositionedByteRange src); static boolean isNumeric(PositionedByteRange src); static boolean isNumericInfinite(PositionedByteRange src); static boolean isNumericNaN(PositionedByteRange src); static boolean isNumericZero(PositionedByteRange src); static boolean isFixedInt32(PositionedByteRange src); static boolean isFixedInt64(PositionedByteRange src); static boolean isFixedFloat32(PositionedByteRange src); static boolean isFixedFloat64(PositionedByteRange src); static boolean isText(PositionedByteRange src); static boolean isBlobVar(PositionedByteRange src); static boolean isBlobCopy(PositionedByteRange src); static int skip(PositionedByteRange src); static int length(PositionedByteRange buff); static final Charset UTF8; static final int MAX_PRECISION; static final MathContext DEFAULT_MATH_CONTEXT; }
@Test public void testSkip() { BigDecimal longMax = BigDecimal.valueOf(Long.MAX_VALUE); double negInf = Double.NEGATIVE_INFINITY; BigDecimal negLarge = longMax.multiply(longMax).negate(); BigDecimal negMed = new BigDecimal("-10.0"); BigDecimal negSmall = new BigDecimal("-0.0010"); long zero = 0l; BigDecimal posSmall = negSmall.negate(); BigDecimal posMed = negMed.negate(); BigDecimal posLarge = negLarge.negate(); double posInf = Double.POSITIVE_INFINITY; double nan = Double.NaN; byte int8 = 100; short int16 = 100; int int32 = 100; long int64 = 100l; float float32 = 100.0f; double float64 = 100.0d; String text = "hello world."; byte[] blobVar = Bytes.toBytes("foo"); byte[] blobCopy = Bytes.toBytes("bar"); for (Order ord : new Order[] { Order.ASCENDING, Order.DESCENDING }) { PositionedByteRange buff = new SimplePositionedByteRange(30); int o; o = OrderedBytes.encodeNull(buff, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negInf, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negLarge, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negMed, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, negSmall, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, zero, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posSmall, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posMed, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posLarge, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, posInf, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeNumeric(buff, nan, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt8(buff, int8, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt16(buff, int16, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt32(buff, int32, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeInt64(buff, int64, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeFloat32(buff, float32, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeFloat64(buff, float64, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeString(buff, text, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.setPosition(0); o = OrderedBytes.encodeBlobVar(buff, blobVar, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); buff.set(blobCopy.length + (Order.ASCENDING == ord ? 1 : 2)); o = OrderedBytes.encodeBlobCopy(buff, blobCopy, ord); buff.setPosition(0); assertEquals(o, OrderedBytes.skip(buff)); } }
CoprocessorClassLoader extends ClassLoaderBase { public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent, final String pathPrefix, final Configuration conf) throws IOException { CoprocessorClassLoader cl = getIfCached(path); String pathStr = path.toString(); if (cl != null) { LOG.debug("Found classloader "+ cl + " for "+ pathStr); return cl; } if (!pathStr.endsWith(".jar")) { throw new IOException(pathStr + ": not a jar file?"); } Lock lock = locker.acquireLock(pathStr); try { cl = getIfCached(path); if (cl != null) { LOG.debug("Found classloader "+ cl + " for "+ pathStr); return cl; } cl = AccessController.doPrivileged( new PrivilegedAction<CoprocessorClassLoader>() { @Override public CoprocessorClassLoader run() { return new CoprocessorClassLoader(parent); } }); cl.init(path, pathPrefix, conf); CoprocessorClassLoader prev = classLoadersCache.putIfAbsent(path, cl); if (prev != null) { LOG.warn("THIS SHOULD NOT HAPPEN, a class loader" +" is already cached for " + pathStr); cl = prev; } return cl; } finally { lock.unlock(); } } private CoprocessorClassLoader(ClassLoader parent); static CoprocessorClassLoader getIfCached(final Path path); static Collection<? extends ClassLoader> getAllCached(); static void clearCache(); static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent, final String pathPrefix, final Configuration conf); @Override Class<?> loadClass(String name); @Override URL getResource(String name); }
@Test public void testCleanupOldJars() throws Exception { String className = "TestCleanupOldJars"; String folder = TEST_UTIL.getDataTestDir().toString(); File jarFile = ClassLoaderTestHelper.buildJar( folder, className, null, ClassLoaderTestHelper.localDirPath(conf)); File tmpJarFile = new File(jarFile.getParent(), "/tmp/" + className + ".test.jar"); if (tmpJarFile.exists()) tmpJarFile.delete(); assertFalse("tmp jar file should not exist", tmpJarFile.exists()); IOUtils.copyBytes(new FileInputStream(jarFile), new FileOutputStream(tmpJarFile), conf, true); assertTrue("tmp jar file should be created", tmpJarFile.exists()); Path path = new Path(jarFile.getAbsolutePath()); ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader(); CoprocessorClassLoader.parentDirLockSet.clear(); ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "111", conf); assertNotNull("Classloader should be created", classLoader); assertFalse("tmp jar file should be removed", tmpJarFile.exists()); }
AES extends Cipher { @VisibleForTesting SecureRandom getRNG() { return rng; } AES(CipherProvider provider); @Override String getName(); @Override int getKeyLength(); @Override int getIvLength(); @Override Key getRandomKey(); @Override Encryptor getEncryptor(); @Override Decryptor getDecryptor(); @Override OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv); @Override OutputStream createEncryptionStream(OutputStream out, Encryptor e); @Override InputStream createDecryptionStream(InputStream in, Context context, byte[] iv); @Override InputStream createDecryptionStream(InputStream in, Decryptor d); static final int KEY_LENGTH; static final int KEY_LENGTH_BITS; static final int IV_LENGTH; static final String CIPHER_MODE_KEY; static final String CIPHER_PROVIDER_KEY; static final String RNG_ALGORITHM_KEY; static final String RNG_PROVIDER_KEY; }
@Test public void testAlternateRNG() throws Exception { Security.addProvider(new TestProvider()); Configuration conf = new Configuration(); conf.set(AES.RNG_ALGORITHM_KEY, "TestRNG"); conf.set(AES.RNG_PROVIDER_KEY, "TEST"); DefaultCipherProvider.getInstance().setConf(conf); AES aes = new AES(DefaultCipherProvider.getInstance()); assertEquals("AES did not find alternate RNG", aes.getRNG().getAlgorithm(), "TestRNG"); }
LRUDictionary implements Dictionary { @Override public short addEntry(byte[] data, int offset, int length) { if (length <= 0) return NOT_IN_DICTIONARY; return backingStore.put(data, offset, length); } @Override byte[] getEntry(short idx); @Override void init(int initialSize); @Override short findEntry(byte[] data, int offset, int length); @Override short addEntry(byte[] data, int offset, int length); @Override void clear(); }
@Test public void testPassingSameArrayToAddEntry() { int len = HConstants.CATALOG_FAMILY.length; int index = testee.addEntry(HConstants.CATALOG_FAMILY, 0, len); assertFalse(index == testee.addEntry(HConstants.CATALOG_FAMILY, 0, len)); assertFalse(index == testee.addEntry(HConstants.CATALOG_FAMILY, 0, len)); }
LRUDictionary implements Dictionary { @Override public short findEntry(byte[] data, int offset, int length) { short ret = backingStore.findIdx(data, offset, length); if (ret == NOT_IN_DICTIONARY) { addEntry(data, offset, length); } return ret; } @Override byte[] getEntry(short idx); @Override void init(int initialSize); @Override short findEntry(byte[] data, int offset, int length); @Override short addEntry(byte[] data, int offset, int length); @Override void clear(); }
@Test public void TestLRUPolicy(){ for (int i = 0; i < Short.MAX_VALUE; i++) { testee.findEntry((BigInteger.valueOf(i)).toByteArray(), 0, (BigInteger.valueOf(i)).toByteArray().length); } assertTrue(testee.findEntry(BigInteger.ZERO.toByteArray(), 0, BigInteger.ZERO.toByteArray().length) != -1); assertTrue(testee.findEntry(BigInteger.valueOf(Integer.MAX_VALUE).toByteArray(), 0, BigInteger.valueOf(Integer.MAX_VALUE).toByteArray().length) == -1); assertTrue(testee.findEntry(BigInteger.valueOf(Integer.MAX_VALUE).toByteArray(), 0, BigInteger.valueOf(Integer.MAX_VALUE).toByteArray().length) != -1); assertTrue(testee.findEntry(BigInteger.ZERO.toByteArray(), 0, BigInteger.ZERO.toByteArray().length) != -1); for(int i = 1; i < Short.MAX_VALUE; i++) { assertTrue(testee.findEntry(BigInteger.valueOf(i).toByteArray(), 0, BigInteger.valueOf(i).toByteArray().length) == -1); } for (int i = 0; i < Short.MAX_VALUE; i++) { assertTrue(testee.findEntry(BigInteger.valueOf(i).toByteArray(), 0, BigInteger.valueOf(i).toByteArray().length) != -1); } }
CellUtil { public static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2, final byte[] end2) { return (end2.length == 0 || start1.length == 0 || Bytes.compareTo(start1, end2) < 0) && (end1.length == 0 || start2.length == 0 || Bytes.compareTo(start2, end1) < 0); } static ByteRange fillRowRange(Cell cell, ByteRange range); static ByteRange fillFamilyRange(Cell cell, ByteRange range); static ByteRange fillQualifierRange(Cell cell, ByteRange range); static ByteRange fillTagRange(Cell cell, ByteRange range); static byte[] cloneRow(Cell cell); static byte[] cloneFamily(Cell cell); static byte[] cloneQualifier(Cell cell); static byte[] cloneValue(Cell cell); static byte[] getTagArray(Cell cell); static int copyRowTo(Cell cell, byte[] destination, int destinationOffset); static int copyFamilyTo(Cell cell, byte[] destination, int destinationOffset); static int copyQualifierTo(Cell cell, byte[] destination, int destinationOffset); static int copyValueTo(Cell cell, byte[] destination, int destinationOffset); static int copyTagTo(Cell cell, byte[] destination, int destinationOffset); static byte getRowByte(Cell cell, int index); static ByteBuffer getValueBufferShallowCopy(Cell cell); static ByteBuffer getQualifierBufferShallowCopy(Cell cell); static Cell createCell(final byte [] row, final byte [] family, final byte [] qualifier, final long timestamp, final byte type, final byte [] value); static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte type, final byte[] value, final long memstoreTS); static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte type, final byte[] value, byte[] tags, final long memstoreTS); static Cell createCell(final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, Type type, final byte[] value, byte[] tags); static CellScanner createCellScanner(final List<? extends CellScannable> cellScannerables); static CellScanner createCellScanner(final Iterable<Cell> cellIterable); static CellScanner createCellScanner(final Iterator<Cell> cells); static CellScanner createCellScanner(final Cell[] cellArray); static CellScanner createCellScanner(final NavigableMap<byte [], List<Cell>> map); static boolean matchingRow(final Cell left, final Cell right); static boolean matchingRow(final Cell left, final byte[] buf); static boolean matchingRow(final Cell left, final byte[] buf, final int offset, final int length); static boolean matchingFamily(final Cell left, final Cell right); static boolean matchingFamily(final Cell left, final byte[] buf); static boolean matchingFamily(final Cell left, final byte[] buf, final int offset, final int length); static boolean matchingQualifier(final Cell left, final Cell right); static boolean matchingQualifier(final Cell left, final byte[] buf); static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset, final int length); static boolean matchingColumn(final Cell left, final byte[] fam, final byte[] qual); static boolean matchingColumn(final Cell left, final byte[] fam, final int foffset, final int flength, final byte[] qual, final int qoffset, final int qlength); static boolean matchingColumn(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final Cell right); static boolean matchingValue(final Cell left, final byte[] buf); static boolean isDelete(final Cell cell); static boolean isDeleteFamily(final Cell cell); static boolean isDeleteFamilyVersion(final Cell cell); static int estimatedSizeOf(final Cell cell); static Iterator<Tag> tagsIterator(final byte[] tags, final int offset, final int length); static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2, final byte[] end2); }
@Test public void testOverlappingKeys() { byte[] empty = HConstants.EMPTY_BYTE_ARRAY; byte[] a = Bytes.toBytes("a"); byte[] b = Bytes.toBytes("b"); byte[] c = Bytes.toBytes("c"); byte[] d = Bytes.toBytes("d"); Assert.assertTrue(CellUtil.overlappingKeys(a, b, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, c, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(b, c, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, c, b, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, d, b, c)); Assert.assertTrue(CellUtil.overlappingKeys(b, c, a, d)); Assert.assertTrue(CellUtil.overlappingKeys(empty, b, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(empty, b, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, empty, a, b)); Assert.assertTrue(CellUtil.overlappingKeys(a, empty, a, c)); Assert.assertTrue(CellUtil.overlappingKeys(a, b, empty, empty)); Assert.assertTrue(CellUtil.overlappingKeys(empty, empty, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(a, b, c, d)); Assert.assertFalse(CellUtil.overlappingKeys(c, d, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, c, d)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, c, empty)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, d, empty)); Assert.assertFalse(CellUtil.overlappingKeys(c, d, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(c, empty, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(d, empty, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, a, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, empty, b)); Assert.assertFalse(CellUtil.overlappingKeys(b, c, empty, a)); Assert.assertFalse(CellUtil.overlappingKeys(a,b, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(empty, b, b, c)); Assert.assertFalse(CellUtil.overlappingKeys(empty, a, b, c)); }
FSHDFSUtils extends FSUtils { private boolean isFileClosed(final DistributedFileSystem dfs, final Method m, final Path p) { try { return (Boolean) m.invoke(dfs, p); } catch (SecurityException e) { LOG.warn("No access", e); } catch (Exception e) { LOG.warn("Failed invocation for " + p.toString(), e); } return false; } static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs); @Override void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter); }
@Test (timeout = 30000) public void testIsFileClosed() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class); Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(true); Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE); Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE); }
OrderedBlob extends OrderedBytesBase<byte[]> { @Override public int encodedLength(byte[] val) { return null == val ? (Order.ASCENDING == order ? 1 : 2) : (Order.ASCENDING == order ? val.length + 1 : val.length + 2); } protected OrderedBlob(Order order); @Override boolean isSkippable(); @Override int encodedLength(byte[] val); @Override Class<byte[]> encodedClass(); @Override byte[] decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, byte[] val); int encode(PositionedByteRange dst, byte[] val, int voff, int vlen); static final OrderedBlob ASCENDING; static final OrderedBlob DESCENDING; }
@Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedByteRange(20); for (DataType<byte[]> type : new OrderedBlob[] { OrderedBlob.ASCENDING, OrderedBlob.DESCENDING }) { for (byte[] val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + Bytes.toStringBinary(val), buff.getPosition(), type.encodedLength(val)); } } } @Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedMutableByteRange(20); for (DataType<byte[]> type : new OrderedBlob[] { OrderedBlob.ASCENDING, OrderedBlob.DESCENDING }) { for (byte[] val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + Bytes.toStringBinary(val), buff.getPosition(), type.encodedLength(val)); } } }
OrderedString extends OrderedBytesBase<String> { @Override public int encodedLength(String val) { return null == val ? 1 : val.getBytes(OrderedBytes.UTF8).length + 2; } protected OrderedString(Order order); @Override int encodedLength(String val); @Override Class<String> encodedClass(); @Override String decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, String val); static final OrderedString ASCENDING; static final OrderedString DESCENDING; }
@Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedByteRange(20); for (DataType<String> type : new OrderedString[] { OrderedString.ASCENDING, OrderedString.DESCENDING }) { for (String val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + val, buff.getPosition(), type.encodedLength(val)); } } } @Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedMutableByteRange(20); for (DataType<String> type : new OrderedString[] { OrderedString.ASCENDING, OrderedString.DESCENDING }) { for (String val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + val, buff.getPosition(), type.encodedLength(val)); } } }
OrderedBlobVar extends OrderedBytesBase<byte[]> { @Override public int encodedLength(byte[] val) { return null == val ? 1 : OrderedBytes.blobVarEncodedLength(val.length); } protected OrderedBlobVar(Order order); @Override int encodedLength(byte[] val); @Override Class<byte[]> encodedClass(); @Override byte[] decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, byte[] val); int encode(PositionedByteRange dst, byte[] val, int voff, int vlen); static final OrderedBlobVar ASCENDING; static final OrderedBlobVar DESCENDING; }
@Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedByteRange(20); for (DataType<byte[]> type : new OrderedBlobVar[] { OrderedBlobVar.ASCENDING, OrderedBlobVar.DESCENDING }) { for (byte[] val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + Bytes.toStringBinary(val), buff.getPosition(), type.encodedLength(val)); } } } @Test public void testEncodedLength() { PositionedByteRange buff = new SimplePositionedMutableByteRange(20); for (DataType<byte[]> type : new OrderedBlobVar[] { OrderedBlobVar.ASCENDING, OrderedBlobVar.DESCENDING }) { for (byte[] val : VALUES) { buff.setPosition(0); type.encode(buff, val); assertEquals( "encodedLength does not match actual, " + Bytes.toStringBinary(val), buff.getPosition(), type.encodedLength(val)); } } }
TerminatedWrapper implements DataType<T> { @Override public int encode(PositionedByteRange dst, T val) { final int start = dst.getPosition(); int written = wrapped.encode(dst, val); PositionedByteRange b = dst.shallowCopy(); b.setLength(dst.getPosition()); b.setPosition(start); if (-1 != terminatorPosition(b)) { dst.setPosition(start); throw new IllegalArgumentException("Encoded value contains terminator sequence."); } dst.put(term); return written + term.length; } TerminatedWrapper(DataType<T> wrapped, byte[] term); TerminatedWrapper(DataType<T> wrapped, String term); @Override boolean isOrderPreserving(); @Override Order getOrder(); @Override boolean isNullable(); @Override boolean isSkippable(); @Override int encodedLength(T val); @Override Class<T> encodedClass(); @Override int skip(PositionedByteRange src); @Override T decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, T val); }
@Test(expected = IllegalArgumentException.class) public void testEncodedValueContainsTerm() { DataType<byte[]> type = new TerminatedWrapper<byte[]>(new RawBytes(), "foo"); PositionedByteRange buff = new SimplePositionedByteRange(16); type.encode(buff, Bytes.toBytes("hello foobar!")); } @Test(expected = IllegalArgumentException.class) public void testEncodedValueContainsTerm() { DataType<byte[]> type = new TerminatedWrapper<byte[]>(new RawBytes(), "foo"); PositionedByteRange buff = new SimplePositionedMutableByteRange(16); type.encode(buff, Bytes.toBytes("hello foobar!")); }
TerminatedWrapper implements DataType<T> { @Override public int skip(PositionedByteRange src) { if (wrapped.isSkippable()) { int ret = wrapped.skip(src); src.setPosition(src.getPosition() + term.length); return ret + term.length; } else { final int start = src.getPosition(); int skipped = terminatorPosition(src); if (-1 == skipped) throw new IllegalArgumentException("Terminator sequence not found."); skipped += term.length; src.setPosition(skipped); return skipped - start; } } TerminatedWrapper(DataType<T> wrapped, byte[] term); TerminatedWrapper(DataType<T> wrapped, String term); @Override boolean isOrderPreserving(); @Override Order getOrder(); @Override boolean isNullable(); @Override boolean isSkippable(); @Override int encodedLength(T val); @Override Class<T> encodedClass(); @Override int skip(PositionedByteRange src); @Override T decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, T val); }
@Test(expected = IllegalArgumentException.class) public void testInvalidSkip() { PositionedByteRange buff = new SimplePositionedByteRange(Bytes.toBytes("foo")); DataType<byte[]> type = new TerminatedWrapper<byte[]>(new RawBytes(), new byte[] { 0x00 }); type.skip(buff); } @Test(expected = IllegalArgumentException.class) public void testInvalidSkip() { PositionedByteRange buff = new SimplePositionedMutableByteRange(Bytes.toBytes("foo")); DataType<byte[]> type = new TerminatedWrapper<byte[]>(new RawBytes(), new byte[] { 0x00 }); type.skip(buff); }
FixedLengthWrapper implements DataType<T> { @Override public T decode(PositionedByteRange src) { if (src.getRemaining() < length) { throw new IllegalArgumentException("Not enough buffer remaining. src.offset: " + src.getOffset() + " src.length: " + src.getLength() + " src.position: " + src.getPosition() + " max length: " + length); } PositionedByteRange b = new SimplePositionedByteRange(length); src.get(b.getBytes()); return base.decode(b); } FixedLengthWrapper(DataType<T> base, int length); int getLength(); @Override boolean isOrderPreserving(); @Override Order getOrder(); @Override boolean isNullable(); @Override boolean isSkippable(); @Override int encodedLength(T val); @Override Class<T> encodedClass(); @Override int skip(PositionedByteRange src); @Override T decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, T val); }
@Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingRead() { PositionedByteRange buff = new SimplePositionedByteRange(0); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 3); type.decode(buff); }
FixedLengthWrapper implements DataType<T> { @Override public int encode(PositionedByteRange dst, T val) { if (dst.getRemaining() < length) { throw new IllegalArgumentException("Not enough buffer remaining. dst.offset: " + dst.getOffset() + " dst.length: " + dst.getLength() + " dst.position: " + dst.getPosition() + " max length: " + length); } int written = base.encode(dst, val); if (written > length) { throw new IllegalArgumentException("Length of encoded value (" + written + ") exceeds max length (" + length + ")."); } for (; written < length; written++) { dst.put((byte) 0x00); } return written; } FixedLengthWrapper(DataType<T> base, int length); int getLength(); @Override boolean isOrderPreserving(); @Override Order getOrder(); @Override boolean isNullable(); @Override boolean isSkippable(); @Override int encodedLength(T val); @Override Class<T> encodedClass(); @Override int skip(PositionedByteRange src); @Override T decode(PositionedByteRange src); @Override int encode(PositionedByteRange dst, T val); }
@Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingWrite() { PositionedByteRange buff = new SimplePositionedByteRange(0); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 3); type.encode(buff, Bytes.toBytes("")); } @Test(expected = IllegalArgumentException.class) public void testOverflowPassthrough() { PositionedByteRange buff = new SimplePositionedByteRange(3); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 0); type.encode(buff, Bytes.toBytes("foo")); } @Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingWrite() { PositionedByteRange buff = new SimplePositionedMutableByteRange(0); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 3); type.encode(buff, Bytes.toBytes("")); } @Test(expected = IllegalArgumentException.class) public void testOverflowPassthrough() { PositionedByteRange buff = new SimplePositionedMutableByteRange(3); DataType<byte[]> type = new FixedLengthWrapper<byte[]>(new RawBytes(), 0); type.encode(buff, Bytes.toBytes("foo")); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean exists(ByteBuffer table, TGet get) throws TIOError, TException { HTableInterface htable = getTable(table); try { return htable.exists(getFromThrift(get)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testExists() throws TIOError, TException { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testExists".getBytes(); ByteBuffer table = wrap(tableAname); TGet get = new TGet(wrap(rowName)); assertFalse(handler.exists(table, get)); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); assertTrue(handler.exists(table, get)); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes) throws TIOError, TException { HTableInterface htable = getTable(table); try { htable.delete(deletesFromThrift(deletes)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } return Collections.emptyList(); } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testDeleteMultiple() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); ByteBuffer table = wrap(tableAname); byte[] rowName1 = "testDeleteMultiple1".getBytes(); byte[] rowName2 = "testDeleteMultiple2".getBytes(); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); List<TPut> puts = new ArrayList<TPut>(); puts.add(new TPut(wrap(rowName1), columnValues)); puts.add(new TPut(wrap(rowName2), columnValues)); handler.putMultiple(table, puts); List<TDelete> deletes = new ArrayList<TDelete>(); deletes.add(new TDelete(wrap(rowName1))); deletes.add(new TDelete(wrap(rowName2))); List<TDelete> deleteResults = handler.deleteMultiple(table, deletes); assertEquals(0, deleteResults.size()); assertFalse(handler.exists(table, new TGet(wrap(rowName1)))); assertFalse(handler.exists(table, new TGet(wrap(rowName2)))); }
FSHDFSUtils extends FSUtils { public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) { String srcServiceName = srcFs.getCanonicalServiceName(); String desServiceName = desFs.getCanonicalServiceName(); if (srcServiceName == null || desServiceName == null) { return false; } if (srcServiceName.equals(desServiceName)) { return true; } if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) { Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf); Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf); if (Sets.intersection(srcAddrs, desAddrs).size() > 0) { return true; } } return false; } static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs); @Override void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter); }
@Test public void testIsSameHdfs() throws IOException { try { Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil"); dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class); } catch (Exception e) { LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version."); return; } Configuration conf = HBaseConfiguration.create(); Path srcPath = new Path("hdfs: Path desPath = new Path("hdfs: FileSystem srcFs = srcPath.getFileSystem(conf); FileSystem desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("hdfs: desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("hdfs: desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); conf.set("fs.defaultFS", "hdfs: conf.set("dfs.nameservices", "haosong-hadoop"); conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2"); conf.set("dfs.client.failover.proxy.provider.haosong-hadoop", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000"); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000"); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult increment(ByteBuffer table, TIncrement increment) throws TIOError, TException { HTableInterface htable = getTable(table); try { return resultFromHBase(htable.increment(incrementFromThrift(increment))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testIncrement() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testIncrement".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement increment = new TIncrement(wrap(rowName), incrementColumns); handler.increment(table, increment); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertEquals(1, result.getColumnValuesSize()); TColumnValue columnValue = result.getColumnValues().get(0); assertArrayEquals(Bytes.toBytes(2L), columnValue.getValue()); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult append(ByteBuffer table, TAppend append) throws TIOError, TException { HTableInterface htable = getTable(table); try { return resultFromHBase(htable.append(appendFromThrift(append))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testAppend() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testAppend".getBytes(); ByteBuffer table = wrap(tableAname); byte[] v1 = Bytes.toBytes("42"); byte[] v2 = Bytes.toBytes("23"); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); List<TColumnValue> appendColumns = new ArrayList<TColumnValue>(); appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2))); TAppend append = new TAppend(wrap(rowName), appendColumns); handler.append(table, append); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertEquals(1, result.getColumnValuesSize()); TColumnValue columnValue = result.getColumnValues().get(0); assertArrayEquals(Bytes.add(v1, v2), columnValue.getValue()); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put) throws TIOError, TException { HTableInterface htable = getTable(table); try { return htable.checkAndPut(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), (value == null) ? null : byteBufferToByteArray(value), putFromThrift(put)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testCheckAndPut() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testCheckAndPut".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); assertFalse(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), putB)); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertEquals(0, result.getColumnValuesSize()); handler.put(table, putA); assertTrue(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), putB)); result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List<TColumnValue> returnedColumnValues = result.getColumnValues(); List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueA); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle) throws TIOError, TException { HTableInterface htable = getTable(table); try { if (value == null) { return htable.checkAndDelete(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), null, deleteFromThrift(deleteSingle)); } else { return htable.checkAndDelete(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), byteBufferToByteArray(value), deleteFromThrift(deleteSingle)); } } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testCheckAndDelete() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testCheckAndDelete".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); handler.put(table, putB); TDelete delete = new TDelete(wrap(rowName)); assertFalse(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete)); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); assertTColumnValuesEqual(columnValuesB, result.getColumnValues()); handler.put(table, putA); assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete)); result = handler.get(table, get); assertFalse(result.isSetRow()); assertEquals(0, result.getColumnValuesSize()); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows) throws TIOError, TException { HTableInterface htable = getTable(table); List<TResult> results = null; ResultScanner scanner = null; try { scanner = htable.getScanner(scanFromThrift(scan)); results = resultsFromHBase(scanner.next(numRows)); } catch (IOException e) { throw getTIOError(e); } finally { if (scanner != null) { scanner.close(); } closeTable(htable); } return results; } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testGetScannerResults() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); ByteBuffer table = wrap(tableAname); TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(columnValue); for (int i = 0; i < 20; i++) { TPut put = new TPut(wrap(("testGetScannerResults" + pad(i, (byte) 2)).getBytes()), columnValues); handler.put(table, put); } TScan scan = new TScan(); List<TColumn> columns = new ArrayList<TColumn>(); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testGetScannerResults".getBytes()); scan.setStopRow("testGetScannerResults05".getBytes()); List<TResult> results = handler.getScannerResults(table, scan, 5); assertEquals(5, results.size()); for (int i = 0; i < 5; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan.setStopRow("testGetScannerResults10".getBytes()); results = handler.getScannerResults(table, scan, 10); assertEquals(10, results.size()); for (int i = 0; i < 10; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan.setStopRow("testGetScannerResults20".getBytes()); results = handler.getScannerResults(table, scan, 20); assertEquals(20, results.size()); for (int i = 0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(i, (byte) 2)).getBytes(), results.get(i) .getRow()); } scan = new TScan(); scan.setColumns(columns); scan.setReversed(true); scan.setStartRow("testGetScannerResults20".getBytes()); scan.setStopRow("testGetScannerResults".getBytes()); results = handler.getScannerResults(table, scan, 20); assertEquals(20, results.size()); for (int i = 0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(19 - i, (byte) 2)).getBytes(), results.get(i) .getRow()); } }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult get(ByteBuffer table, TGet get) throws TIOError, TException { HTableInterface htable = getTable(table); try { return resultFromHBase(htable.get(getFromThrift(get))); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testFilterRegistration() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.set("hbase.thrift.filters", "MyFilter:filterclass"); ThriftServer.registerFilters(conf); Map<String, String> registeredFilters = ParseFilter.getAllFilters(); assertEquals("filterclass", registeredFilters.get("MyFilter")); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void put(ByteBuffer table, TPut put) throws TIOError, TException { HTableInterface htable = getTable(table); try { htable.put(putFromThrift(put)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testAttribute() throws Exception { byte[] rowName = "testAttribute".getBytes(); byte[] attributeKey = "attribute1".getBytes(); byte[] attributeValue = "value1".getBytes(); Map<ByteBuffer, ByteBuffer> attributes = new HashMap<ByteBuffer, ByteBuffer>(); attributes.put(wrap(attributeKey), wrap(attributeValue)); TGet tGet = new TGet(wrap(rowName)); tGet.setAttributes(attributes); Get get = getFromThrift(tGet); assertArrayEquals(get.getAttribute("attribute1"), attributeValue); List<TColumnValue> columnValues = new ArrayList<TColumnValue>(); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); TPut tPut = new TPut(wrap(rowName) , columnValues); tPut.setAttributes(attributes); Put put = putFromThrift(tPut); assertArrayEquals(put.getAttribute("attribute1"), attributeValue); TScan tScan = new TScan(); tScan.setAttributes(attributes); Scan scan = scanFromThrift(tScan); assertArrayEquals(scan.getAttribute("attribute1"), attributeValue); List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns); tIncrement.setAttributes(attributes); Increment increment = incrementFromThrift(tIncrement); assertArrayEquals(increment.getAttribute("attribute1"), attributeValue); TDelete tDelete = new TDelete(wrap(rowName)); tDelete.setAttributes(attributes); Delete delete = deleteFromThrift(tDelete); assertArrayEquals(delete.getAttribute("attribute1"), attributeValue); }
ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void mutateRow(ByteBuffer table, TRowMutations rowMutations) throws TIOError, TException { HTableInterface htable = getTable(table); try { htable.mutateRow(rowMutationsFromThrift(rowMutations)); } catch (IOException e) { throw getTIOError(e); } finally { closeTable(htable); } } ThriftHBaseServiceHandler(Configuration conf); static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics); @Override boolean exists(ByteBuffer table, TGet get); @Override TResult get(ByteBuffer table, TGet get); @Override List<TResult> getMultiple(ByteBuffer table, List<TGet> gets); @Override void put(ByteBuffer table, TPut put); @Override boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put); @Override void putMultiple(ByteBuffer table, List<TPut> puts); @Override void deleteSingle(ByteBuffer table, TDelete deleteSingle); @Override List<TDelete> deleteMultiple(ByteBuffer table, List<TDelete> deletes); @Override boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle); @Override TResult increment(ByteBuffer table, TIncrement increment); @Override TResult append(ByteBuffer table, TAppend append); @Override int openScanner(ByteBuffer table, TScan scan); @Override List<TResult> getScannerRows(int scannerId, int numRows); @Override List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows); @Override void closeScanner(int scannerId); @Override void mutateRow(ByteBuffer table, TRowMutations rowMutations); }
@Test public void testMutateRow() throws Exception { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testMutateRow".getBytes(); ByteBuffer table = wrap(tableAname); List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); handler.put(table,putA); TGet get = new TGet(wrap(rowName)); TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List<TColumnValue> returnedColumnValues = result.getColumnValues(); List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueA); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(); TColumnValue columnValueB = new TColumnValue(wrap(familyAname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(wrap(rowName), columnValuesB); putB.setColumnValues(columnValuesB); TDelete delete = new TDelete(wrap(rowName)); List<TColumn> deleteColumns = new ArrayList<TColumn>(); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); List<TMutation> mutations = new ArrayList<TMutation>(); TMutation mutationA = TMutation.put(putB); mutations.add(mutationA); TMutation mutationB = TMutation.deleteSingle(delete); mutations.add(mutationB); TRowMutations tRowMutations = new TRowMutations(wrap(rowName),mutations); handler.mutateRow(table,tRowMutations); result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); returnedColumnValues = result.getColumnValues(); expectedColumnValues = new ArrayList<TColumnValue>(); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); }
CompatibilitySingletonFactory extends CompatibilityFactory { @SuppressWarnings("unchecked") public static <T> T getInstance(Class<T> klass) { synchronized (SingletonStorage.INSTANCE.lock) { T instance = (T) SingletonStorage.INSTANCE.instances.get(klass); if (instance == null) { try { ServiceLoader<T> loader = ServiceLoader.load(klass); Iterator<T> it = loader.iterator(); instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") .append(klass) .append(", using implementation: ").append(instance.getClass()) .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } msg.append("}"); LOG.warn(msg); } } catch (Exception e) { throw new RuntimeException(createExceptionString(klass), e); } catch (Error e) { throw new RuntimeException(createExceptionString(klass), e); } if (instance == null) { throw new RuntimeException(createExceptionString(klass)); } SingletonStorage.INSTANCE.instances.put(klass, instance); } return instance; } } protected CompatibilitySingletonFactory(); @SuppressWarnings("unchecked") static T getInstance(Class<T> klass); }
@Test public void testGetInstance() throws Exception { List<TestCompatibilitySingletonFactoryCallable> callables = new ArrayList<TestCompatibilitySingletonFactoryCallable>(ITERATIONS); List<String> resultStrings = new ArrayList<String>(ITERATIONS); for (int i = 0; i < ITERATIONS; i++) { callables.add(new TestCompatibilitySingletonFactoryCallable()); } ExecutorService executorService = Executors.newFixedThreadPool(100); List<Future<String>> futures = executorService.invokeAll(callables); for (Future<String> f : futures) { resultStrings.add(f.get()); } String firstString = resultStrings.get(0); for (String s : resultStrings) { assertEquals(firstString, s); } assertNotEquals(new RandomStringGeneratorImpl().getRandString(), firstString); }
IPCUtil { @SuppressWarnings("resource") ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, final CellScanner cellScanner) throws IOException { if (cellScanner == null) return null; if (codec == null) throw new CellScannerButNoCodecException(); int bufferSize = this.cellBlockBuildingInitialBufferSize; if (cellScanner instanceof HeapSize) { long longSize = ((HeapSize)cellScanner).heapSize(); if (longSize > Integer.MAX_VALUE) { throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE); } bufferSize = ClassSize.align((int)longSize); } ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize); OutputStream os = baos; Compressor poolCompressor = null; try { if (compressor != null) { if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf); poolCompressor = CodecPool.getCompressor(compressor); os = compressor.createOutputStream(os, poolCompressor); } Codec.Encoder encoder = codec.getEncoder(os); int count = 0; while (cellScanner.advance()) { encoder.write(cellScanner.current()); count++; } encoder.flush(); if (count == 0) return null; } finally { os.close(); if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor); } if (LOG.isTraceEnabled()) { if (bufferSize < baos.size()) { LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() + "; up hbase.ipc.cellblock.building.initial.buffersize?"); } } return baos.getByteBuffer(); } IPCUtil(final Configuration conf); static final Log LOG; }
@Test public void testBuildCellBlock() throws IOException { doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), null); doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), new DefaultCodec()); doBuildCellBlockUndoCellBlock(this.util, new KeyValueCodec(), new GzipCodec()); }
FSTableDescriptors implements TableDescriptors { @VisibleForTesting static int getTableInfoSequenceId(final Path p) { if (p == null) return 0; Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName()); if (!m.matches()) throw new IllegalArgumentException(p.toString()); String suffix = m.group(2); if (suffix == null || suffix.length() <= 0) return 0; return Integer.parseInt(m.group(2)); } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final FileSystem fs, final Path rootdir); FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean fsreadonly); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir, HTableDescriptor htd, boolean forceCreation); }
@Test (expected=IllegalArgumentException.class) public void testRegexAgainstOldStyleTableInfo() { Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX); int i = FSTableDescriptors.getTableInfoSequenceId(p); assertEquals(0, i); p = new Path("/tmp", "abc"); FSTableDescriptors.getTableInfoSequenceId(p); }
PayloadCarryingRpcController extends TimeLimitedRpcController implements CellScannable { public CellScanner cellScanner() { return cellScanner; } PayloadCarryingRpcController(); PayloadCarryingRpcController(final CellScanner cellScanner); PayloadCarryingRpcController(final List<CellScannable> cellIterables); CellScanner cellScanner(); void setCellScanner(final CellScanner cellScanner); void setPriority(int priority); void setPriority(final TableName tn); int getPriority(); }
@Test public void testListOfCellScannerables() throws IOException { List<CellScannable> cells = new ArrayList<CellScannable>(); final int count = 10; for (int i = 0; i < count; i++) { cells.add(createCell(i)); } PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); CellScanner cellScanner = controller.cellScanner(); int index = 0; for (; cellScanner.advance(); index++) { Cell cell = cellScanner.current(); byte [] indexBytes = Bytes.toBytes(index); assertTrue("" + index, Bytes.equals(indexBytes, 0, indexBytes.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } assertEquals(count, index); }
AsyncProcess { public <CResult> AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults) throws InterruptedIOException { return submit(null, tableName, rows, atLeastOne, callback, needResults); } AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool, RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory); AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submit(ExecutorService pool, TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submitAll(TableName tableName, List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName, List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); boolean hasError(); RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset( List<Row> failedRows); }
@Test public void testSubmit() throws Exception { ClusterConnection hc = createHConnection(); AsyncProcess ap = new MyAsyncProcess(hc, conf); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); } @Test public void testSubmitBusyRegionServer() throws Exception { ClusterConnection hc = createHConnection(); AsyncProcess ap = new MyAsyncProcess(hc, conf); ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer)); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); puts.add(createPut(3, true)); puts.add(createPut(1, true)); puts.add(createPut(2, true)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertEquals(" puts=" + puts, 1, puts.size()); ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer - 1)); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); } @Test public void testSubmitTrue() throws IOException { final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false); ap.tasksInProgress.incrementAndGet(); final AtomicInteger ai = new AtomicInteger(1); ap.taskCounterPerRegion.put(hri1.getRegionName(), ai); final AtomicBoolean checkPoint = new AtomicBoolean(false); final AtomicBoolean checkPoint2 = new AtomicBoolean(false); Thread t = new Thread(){ @Override public void run(){ Threads.sleep(1000); Assert.assertFalse(checkPoint.get()); ai.decrementAndGet(); ap.tasksInProgress.decrementAndGet(); checkPoint2.set(true); } }; List<Put> puts = new ArrayList<Put>(); Put p = createPut(1, true); puts.add(p); ap.submit(DUMMY_TABLE, puts, false, null, false); Assert.assertFalse(puts.isEmpty()); t.start(); ap.submit(DUMMY_TABLE, puts, true, null, false); Assert.assertTrue(puts.isEmpty()); checkPoint.set(true); while (!checkPoint2.get()){ Threads.sleep(1); } }
AsyncProcess { @VisibleForTesting void waitUntilDone() throws InterruptedIOException { waitForMaximumCurrentTasks(0); } AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool, RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory); AsyncRequestFuture submit(TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submit(ExecutorService pool, TableName tableName, List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback, boolean needResults); AsyncRequestFuture submitAll(TableName tableName, List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName, List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results); boolean hasError(); RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset( List<Row> failedRows); }
@Test public void testHTableFailedPutAndNewPut() throws Exception { HTable ht = new HTable(); MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true); ht.ap = ap; ht.setAutoFlush(false, true); ht.setWriteBufferSize(0); Put p = createPut(1, false); ht.put(p); ap.waitUntilDone(); p = createPut(1, true); Assert.assertEquals(0, ht.writeAsyncBuffer.size()); try { ht.put(p); Assert.fail(); } catch (RetriesExhaustedException expected) { } Assert.assertEquals("the put should not been inserted.", 0, ht.writeAsyncBuffer.size()); }
Get extends Query implements Row, Comparable<Row> { @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } Row other = (Row) obj; return compareTo(other) == 0; } Get(byte [] row); Get(Get get); boolean isCheckExistenceOnly(); void setCheckExistenceOnly(boolean checkExistenceOnly); boolean isClosestRowBefore(); void setClosestRowBefore(boolean closestRowBefore); Get addFamily(byte [] family); Get addColumn(byte [] family, byte [] qualifier); Get setTimeRange(long minStamp, long maxStamp); Get setTimeStamp(long timestamp); Get setMaxVersions(); Get setMaxVersions(int maxVersions); Get setMaxResultsPerColumnFamily(int limit); Get setRowOffsetPerColumnFamily(int offset); @Override Get setFilter(Filter filter); void setCacheBlocks(boolean cacheBlocks); boolean getCacheBlocks(); byte [] getRow(); int getMaxVersions(); int getMaxResultsPerColumnFamily(); int getRowOffsetPerColumnFamily(); TimeRange getTimeRange(); Set<byte[]> familySet(); int numFamilies(); boolean hasFamilies(); Map<byte[],NavigableSet<byte[]>> getFamilyMap(); @Override Map<String, Object> getFingerprint(); @Override Map<String, Object> toMap(int maxCols); @Override int compareTo(Row other); @Override int hashCode(); @Override boolean equals(Object obj); }
@Test public void testAttributesSerialization() throws IOException { Get get = new Get(Bytes.toBytes("row")); get.setAttribute("attribute1", Bytes.toBytes("value1")); get.setAttribute("attribute2", Bytes.toBytes("value2")); get.setAttribute("attribute3", Bytes.toBytes("value3")); ClientProtos.Get getProto = ProtobufUtil.toGet(get); Get get2 = ProtobufUtil.toGet(getProto); Assert.assertNull(get2.getAttribute("absent")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get2.getAttribute("attribute1"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get2.getAttribute("attribute2"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"), get2.getAttribute("attribute3"))); Assert.assertEquals(3, get2.getAttributesMap().size()); }
Get extends Query implements Row, Comparable<Row> { public Get(byte [] row) { Mutation.checkRow(row); this.row = row; } Get(byte [] row); Get(Get get); boolean isCheckExistenceOnly(); void setCheckExistenceOnly(boolean checkExistenceOnly); boolean isClosestRowBefore(); void setClosestRowBefore(boolean closestRowBefore); Get addFamily(byte [] family); Get addColumn(byte [] family, byte [] qualifier); Get setTimeRange(long minStamp, long maxStamp); Get setTimeStamp(long timestamp); Get setMaxVersions(); Get setMaxVersions(int maxVersions); Get setMaxResultsPerColumnFamily(int limit); Get setRowOffsetPerColumnFamily(int offset); @Override Get setFilter(Filter filter); void setCacheBlocks(boolean cacheBlocks); boolean getCacheBlocks(); byte [] getRow(); int getMaxVersions(); int getMaxResultsPerColumnFamily(); int getRowOffsetPerColumnFamily(); TimeRange getTimeRange(); Set<byte[]> familySet(); int numFamilies(); boolean hasFamilies(); Map<byte[],NavigableSet<byte[]>> getFamilyMap(); @Override Map<String, Object> getFingerprint(); @Override Map<String, Object> toMap(int maxCols); @Override int compareTo(Row other); @Override int hashCode(); @Override boolean equals(Object obj); }
@Test public void testDynamicFilter() throws Exception { Configuration conf = HBaseConfiguration.create(); String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator; File jarFile = new File(localPath, "MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists()); ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.decode(PB_GET)); ClientProtos.Get getProto2 = ClientProtos.Get.parseFrom(Base64.decode(PB_GET_WITH_FILTER_LIST)); try { ProtobufUtil.toGet(getProto1); fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof ClassNotFoundException); } try { ProtobufUtil.toGet(getProto2); fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); InvocationTargetException ite = (InvocationTargetException)ioe.getCause(); assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.decode(MOCK_FILTER_JAR)); fos.close(); Get get1 = ProtobufUtil.toGet(getProto1); assertEquals("test.MockFilter", get1.getFilter().getClass().getName()); Get get2 = ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); List<Filter> filters = ((FilterList)get2.getFilter()).getFilters(); assertEquals(3, filters.size()); assertEquals("test.MockFilter", filters.get(0).getClass().getName()); assertEquals("my.MockFilter", filters.get(1).getClass().getName()); assertTrue(filters.get(2) instanceof KeyOnlyFilter); }
Operation { public String toJSON(int maxCols) throws IOException { return MAPPER.writeValueAsString(toMap(maxCols)); } abstract Map<String, Object> getFingerprint(); abstract Map<String, Object> toMap(int maxCols); Map<String, Object> toMap(); String toJSON(int maxCols); String toJSON(); String toString(int maxCols); @Override String toString(); }
@Test public void testOperationJSON() throws IOException { Scan scan = new Scan(ROW); scan.addColumn(FAMILY, QUALIFIER); String json = scan.toJSON(); Map<String, Object> parsedJSON = mapper.readValue(json, HashMap.class); assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); List familyInfo = (List) ((Map) parsedJSON.get("families")).get( Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()", familyInfo); assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), familyInfo.get(0)); Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); json = get.toJSON(); parsedJSON = mapper.readValue(json, HashMap.class); assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); familyInfo = (List) ((Map) parsedJSON.get("families")).get( Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()", familyInfo); assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), familyInfo.get(0)); Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, VALUE); json = put.toJSON(); parsedJSON = mapper.readValue(json, HashMap.class); assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); familyInfo = (List) ((Map) parsedJSON.get("families")).get( Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()", familyInfo); assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); Map kvMap = (Map) familyInfo.get(0); assertEquals("Qualifier incorrect in Put.toJSON()", Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", VALUE.length, kvMap.get("vlen")); Delete delete = new Delete(ROW); delete.deleteColumn(FAMILY, QUALIFIER); json = delete.toJSON(); parsedJSON = mapper.readValue(json, HashMap.class); assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); familyInfo = (List) ((Map) parsedJSON.get("families")).get( Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()", familyInfo); assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); kvMap = (Map) familyInfo.get(0); assertEquals("Qualifier incorrect in Delete.toJSON()", Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); }
FSTableDescriptors implements TableDescriptors { private static String formatTableInfoSequenceId(final int number) { byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { b[i] = (byte)((d % 10) + '0'); d /= 10; } return Bytes.toString(b); } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final FileSystem fs, final Path rootdir); FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean fsreadonly); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir, HTableDescriptor htd, boolean forceCreation); }
@Test public void testFormatTableInfoSequenceId() { Path p0 = assertWriteAndReadSequenceId(0); StringBuilder sb = new StringBuilder(); for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), p0.getName()); Path p2 = assertWriteAndReadSequenceId(2); Path p10000 = assertWriteAndReadSequenceId(10000); Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); Comparator<FileStatus> comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0); }
BaseSourceImpl implements BaseSource, MetricsSource { public void setGauge(String gaugeName, long value) { MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, value); gaugeInt.set(value); } BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext); void init(); void setGauge(String gaugeName, long value); void incGauge(String gaugeName, long delta); void decGauge(String gaugeName, long delta); void incCounters(String key, long delta); @Override void updateHistogram(String name, long value); @Override void updateQuantile(String name, long value); void removeMetric(String key); @Override void getMetrics(MetricsCollector metricsCollector, boolean all); DynamicMetricsRegistry getMetricsRegistry(); String getMetricsContext(); String getMetricsDescription(); String getMetricsJmxContext(); String getMetricsName(); }
@Test public void testSetGauge() throws Exception { bmsi.setGauge("testset", 100); assertEquals(100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testset")).value()); bmsi.setGauge("testset", 300); assertEquals(300, ((MutableGaugeLong) bmsi.metricsRegistry.get("testset")).value()); }
BaseSourceImpl implements BaseSource, MetricsSource { public void incGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l); gaugeInt.incr(delta); } BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext); void init(); void setGauge(String gaugeName, long value); void incGauge(String gaugeName, long delta); void decGauge(String gaugeName, long delta); void incCounters(String key, long delta); @Override void updateHistogram(String name, long value); @Override void updateQuantile(String name, long value); void removeMetric(String key); @Override void getMetrics(MetricsCollector metricsCollector, boolean all); DynamicMetricsRegistry getMetricsRegistry(); String getMetricsContext(); String getMetricsDescription(); String getMetricsJmxContext(); String getMetricsName(); }
@Test public void testIncGauge() throws Exception { bmsi.incGauge("testincgauge", 100); assertEquals(100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testincgauge")).value()); bmsi.incGauge("testincgauge", 100); assertEquals(200, ((MutableGaugeLong) bmsi.metricsRegistry.get("testincgauge")).value()); }
BaseSourceImpl implements BaseSource, MetricsSource { public void decGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getLongGauge(gaugeName, 0l); gaugeInt.decr(delta); } BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext); void init(); void setGauge(String gaugeName, long value); void incGauge(String gaugeName, long delta); void decGauge(String gaugeName, long delta); void incCounters(String key, long delta); @Override void updateHistogram(String name, long value); @Override void updateQuantile(String name, long value); void removeMetric(String key); @Override void getMetrics(MetricsCollector metricsCollector, boolean all); DynamicMetricsRegistry getMetricsRegistry(); String getMetricsContext(); String getMetricsDescription(); String getMetricsJmxContext(); String getMetricsName(); }
@Test public void testDecGauge() throws Exception { bmsi.decGauge("testdec", 100); assertEquals(-100, ((MutableGaugeLong) bmsi.metricsRegistry.get("testdec")).value()); bmsi.decGauge("testdec", 100); assertEquals(-200, ((MutableGaugeLong) bmsi.metricsRegistry.get("testdec")).value()); }
BaseSourceImpl implements BaseSource, MetricsSource { public void incCounters(String key, long delta) { MutableCounterLong counter = metricsRegistry.getLongCounter(key, 0l); counter.incr(delta); } BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext); void init(); void setGauge(String gaugeName, long value); void incGauge(String gaugeName, long delta); void decGauge(String gaugeName, long delta); void incCounters(String key, long delta); @Override void updateHistogram(String name, long value); @Override void updateQuantile(String name, long value); void removeMetric(String key); @Override void getMetrics(MetricsCollector metricsCollector, boolean all); DynamicMetricsRegistry getMetricsRegistry(); String getMetricsContext(); String getMetricsDescription(); String getMetricsJmxContext(); String getMetricsName(); }
@Test public void testIncCounters() throws Exception { bmsi.incCounters("testinccounter", 100); assertEquals(100, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value()); bmsi.incCounters("testinccounter", 100); assertEquals(200, ((MutableCounterLong) bmsi.metricsRegistry.get("testinccounter")).value()); }
BaseSourceImpl implements BaseSource, MetricsSource { public void removeMetric(String key) { metricsRegistry.removeMetric(key); JmxCacheBuster.clearJmxCache(); } BaseSourceImpl( String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext); void init(); void setGauge(String gaugeName, long value); void incGauge(String gaugeName, long delta); void decGauge(String gaugeName, long delta); void incCounters(String key, long delta); @Override void updateHistogram(String name, long value); @Override void updateQuantile(String name, long value); void removeMetric(String key); @Override void getMetrics(MetricsCollector metricsCollector, boolean all); DynamicMetricsRegistry getMetricsRegistry(); String getMetricsContext(); String getMetricsDescription(); String getMetricsJmxContext(); String getMetricsName(); }
@Test public void testRemoveMetric() throws Exception { bmsi.setGauge("testrmgauge", 100); bmsi.removeMetric("testrmgauge"); assertNull(bmsi.metricsRegistry.get("testrmgauge")); }
MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory { @Override public MetricsThriftServerSource createThriftOneSource() { return FactoryStorage.INSTANCE.thriftOne; } @Override MetricsThriftServerSource createThriftOneSource(); @Override MetricsThriftServerSource createThriftTwoSource(); }
@Test public void testCreateThriftOneSource() throws Exception { assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); }
MetricsThriftServerSourceFactoryImpl implements MetricsThriftServerSourceFactory { @Override public MetricsThriftServerSource createThriftTwoSource() { return FactoryStorage.INSTANCE.thriftTwo; } @Override MetricsThriftServerSource createThriftOneSource(); @Override MetricsThriftServerSource createThriftTwoSource(); }
@Test public void testCreateThriftTwoSource() throws Exception { assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); }
MetricsRegionSourceImpl implements MetricsRegionSource { @Override public int compareTo(MetricsRegionSource source) { if (!(source instanceof MetricsRegionSourceImpl)) return -1; MetricsRegionSourceImpl impl = (MetricsRegionSourceImpl) source; return this.regionWrapper.getRegionName() .compareTo(impl.regionWrapper.getRegionName()); } MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, MetricsRegionAggregateSourceImpl aggregate); @Override void close(); @Override void updatePut(); @Override void updateDelete(); @Override void updateGet(long getSize); @Override void updateScan(long scanSize); @Override void updateIncrement(); @Override void updateAppend(); @Override MetricsRegionAggregateSource getAggregateSource(); @Override int compareTo(MetricsRegionSource source); @Override boolean equals(Object obj); }
@Test public void testCompareTo() throws Exception { MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO")); assertEquals(0, one.compareTo(oneClone)); assertTrue( one.compareTo(two) < 0); assertTrue( two.compareTo(one) > 0); }
FSTableDescriptors implements TableDescriptors { @Override public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) { cachehits++; return HTableDescriptor.META_TABLEDESC; } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); } TableDescriptorAndModtime cachedtdm = this.cache.get(tablename); if (cachedtdm != null) { if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) { cachehits++; return cachedtdm.getTableDescriptor(); } } TableDescriptorAndModtime tdmt = null; try { tdmt = getTableDescriptorAndModtime(tablename); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); } catch (IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, ioe); } if (tdmt != null) { this.cache.put(tablename, tdmt); } return tdmt == null ? null : tdmt.getTableDescriptor(); } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final FileSystem fs, final Path rootdir); FSTableDescriptors(final FileSystem fs, final Path rootdir, final boolean fsreadonly); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir, HTableDescriptor htd, boolean forceCreation); }
@Test public void testNoSuchTable() throws IOException { final String name = "testNoSuchTable"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable"))); } @Test public void testReadingArchiveDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) .get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (Exception e) { LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " + e.getMessage()); } }
HBaseFsck extends Configured { public HBaseFsck(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException, ClassNotFoundException { super(conf); setConf(HBaseConfiguration.create(getConf())); getConf().setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); errors = getErrorReporter(conf); int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS); executor = new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck")); } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test public void testHBaseFsck() throws Exception { assertNoErrors(doFsck(conf, false)); String table = "tableBadMetaAssign"; TEST_UTIL.createTable(Bytes.toBytes(table), FAM); assertNoErrors(doFsck(conf, false)); HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(), executorService); Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(table+",,")); ResultScanner scanner = meta.getScanner(scan); HRegionInfo hri = null; Result res = scanner.next(); ServerName currServer = ServerName.parseFrom(res.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER)); long startCode = Bytes.toLong(res.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER)); for (JVMClusterUtil.RegionServerThread rs : TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { ServerName sn = rs.getRegionServer().getServerName(); if (!currServer.getHostAndPort().equals(sn.getHostAndPort()) || startCode != sn.getStartcode()) { Put put = new Put(res.getRow()); put.setDurability(Durability.SKIP_WAL); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); meta.put(put); hri = HRegionInfo.getHRegionInfo(res); break; } } assertErrors(doFsck(conf, true), new ERROR_CODE[]{ ERROR_CODE.SERVER_DOES_NOT_MATCH_META}); TEST_UTIL.getHBaseCluster().getMaster() .getAssignmentManager().waitForAssignment(hri); assertNoErrors(doFsck(conf, false)); HTable t = new HTable(conf, Bytes.toBytes(table), executorService); ResultScanner s = t.getScanner(new Scan()); s.close(); t.close(); scanner.close(); meta.close(); }
HBaseFsck extends Configured { private void closeRegion(HbckInfo hi) throws IOException, InterruptedException { if (hi.metaEntry == null && hi.hdfsEntry == null) { undeployRegions(hi); return; } Get get = new Get(hi.getRegionName()); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); Result r = meta.get(get); ServerName serverName = HRegionInfo.getServerName(r); if (serverName == null) { errors.reportError("Unable to close region " + hi.getRegionNameAsString() + " because meta does not " + "have handle to reach it."); return; } HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); return; } HBaseFsckRepair.closeRegionSilentlyAndWait(admin, serverName, hri); } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test(timeout=180000) public void testFixAssignmentsWhenMETAinTransition() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HBaseAdmin admin = null; try { admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO); } finally { if (admin != null) { admin.close(); } } regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO); MetaRegionTracker.deleteMetaLocation(cluster.getMaster().getZooKeeper()); assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); HBaseFsck hbck = doFsck(conf, true); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.UNKNOWN, ERROR_CODE.NO_META_REGION, ERROR_CODE.NULL_META_REGION }); assertNoErrors(doFsck(conf, false)); }
HBaseFsck extends Configured { public Multimap<byte[], HbckInfo> getOverlapGroups( TableName table) { TableInfo ti = tablesInfo.get(table); return ti.overlapGroups; } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test public void testHBaseFsckClean() throws Exception { assertNoErrors(doFsck(conf, false)); TableName table = TableName.valueOf("tableClean"); try { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); setupTable(table); assertEquals(ROWKEYS.length, countRows()); hbck = doFsck(conf, false); assertNoErrors(hbck); assertEquals(0, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testDupeStartKey() throws Exception { TableName table = TableName.valueOf("tableDupeStartKey"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("A2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testDupeRegion() throws Exception { TableName table = TableName.valueOf("tableDupeRegion"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) { Thread.sleep(250); } LOG.debug("Finished assignment of dupe region"); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testDegenerateRegions() throws Exception { TableName table = TableName.valueOf("tableDegenerateRegions"); try { setupTable(table); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf,false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DEGENERATE_REGION, ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS}); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf,true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testContainedRegionOverlap() throws Exception { TableName table = TableName.valueOf("tableContainedRegionOverlap"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(2, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testOverlapAndOrphan() throws Exception { TableName table = TableName.valueOf("tableOverlapAndOrphan"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true, true, false, true); TEST_UTIL.getHBaseAdmin().enableTable(table); HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testCoveredStartKey() throws Exception { TableName table = TableName.valueOf("tableCoveredStartKey"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() .waitForAssignment(hriOverlap); ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.OVERLAP_IN_REGION_CHAIN, ERROR_CODE.OVERLAP_IN_REGION_CHAIN }); assertEquals(3, hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); doFsck(conf, true); HBaseFsck hbck2 = doFsck(conf, false); assertErrors(hbck2, new ERROR_CODE[0]); assertEquals(0, hbck2.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testRegionHole() throws Exception { TableName table = TableName.valueOf("tableRegionHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, true); TEST_UTIL.getHBaseAdmin().enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2 , countRows()); } finally { deleteTable(table); } } @Test public void testHDFSRegioninfoMissing() throws Exception { TableName table = TableName.valueOf("tableHDFSRegioininfoMissing"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false, true); TEST_UTIL.getHBaseAdmin().enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf, false)); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testNotInMetaOrDeployedHole() throws Exception { TableName table = TableName.valueOf("tableNotInMetaOrDeployedHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, true, false); TEST_UTIL.getHBaseAdmin().enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); assertErrors(doFsck(conf, true) , new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testNotInMetaHole() throws Exception { TableName table = TableName.valueOf("tableNotInMetaHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().disableTable(table); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, true, false); TEST_UTIL.getHBaseAdmin().enableTable(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertEquals(0, hbck.getOverlapGroups(table).size()); assertErrors(doFsck(conf, true) , new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length, countRows()); } finally { deleteTable(table); } } @Test public void testNotInHdfs() throws Exception { TableName table = TableName.valueOf("tableNotInHdfs"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().flush(table.getName()); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length - 2, countRows()); } finally { deleteTable(table); } } @Test public void testNoHdfsTable() throws Exception { TableName table = TableName.valueOf("NoHdfsTable"); setupTable(table); assertEquals(ROWKEYS.length, countRows()); TEST_UTIL.getHBaseAdmin().flush(table.getName()); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), Bytes.toBytes("A"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""), false, false, true); deleteTableDir(table); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS,}); assertEquals(0, hbck.getOverlapGroups(table).size()); doFsck(conf, true); assertNoErrors(doFsck(conf,false)); assertFalse("Table "+ table + " should have been deleted", TEST_UTIL.getHBaseAdmin().tableExists(table)); }
HBaseFsck extends Configured { public ErrorReporter getErrors() { return errors; } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test public void testMissingRegionInfoQualifier() throws Exception { TableName table = TableName.valueOf("testMissingRegionInfoQualifier"); try { setupTable(table); final List<Delete> deletes = new LinkedList<Delete>(); HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); MetaScanner.metaScan(conf, new MetaScanner.MetaScannerVisitor() { @Override public boolean processRow(Result rowResult) throws IOException { HRegionInfo hri = MetaScanner.getHRegionInfo(rowResult); if (hri != null && !hri.getTable().isSystemTable()) { Delete delete = new Delete(rowResult.getRow()); delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); deletes.add(delete); } return true; } @Override public void close() throws IOException { } }); meta.delete(deletes); meta.put(new Put(Bytes.toBytes(table + ",,1361911384013.810e28f59a57da91c66")).add( HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes("node1:60020"))); meta.put(new Put(Bytes.toBytes(table + ",,1361911384013.810e28f59a57da91c66")).add( HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(1362150791183L))); meta.close(); HBaseFsck hbck = doFsck(conf, false); assertTrue(hbck.getErrors().getErrorList().contains(ERROR_CODE.EMPTY_META_CELL)); hbck = doFsck(conf, true); assertFalse(hbck.getErrors().getErrorList().contains(ERROR_CODE.EMPTY_META_CELL)); } finally { deleteTable(table); } }
HBaseFsck extends Configured { private void deleteMetaRegion(HbckInfo hi) throws IOException { deleteMetaRegion(hi.metaEntry.getRegionName()); } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test public void testMetaOffline() throws Exception { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); deleteMetaRegion(conf, true, false, false); hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_META_REGION, ERROR_CODE.UNKNOWN }); hbck = doFsck(conf, true); assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_META_REGION, ERROR_CODE.UNKNOWN }); hbck = doFsck(conf, false); assertNoErrors(hbck); }
HBaseFsck extends Configured { public void checkRegionBoundaries() { try { ByteArrayComparator comparator = new ByteArrayComparator(); List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), false); final RegionBoundariesInformation currentRegionBoundariesInformation = new RegionBoundariesInformation(); Path hbaseRoot = FSUtils.getRootDir(getConf()); for (HRegionInfo regionInfo : regions) { Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable()); currentRegionBoundariesInformation.regionName = regionInfo.getRegionName(); Path path = new Path(tableDir, regionInfo.getEncodedName()); FileSystem fs = path.getFileSystem(getConf()); FileStatus[] files = fs.listStatus(path); byte[] storeFirstKey = null; byte[] storeLastKey = null; for (FileStatus file : files) { String fileName = file.getPath().toString(); fileName = fileName.substring(fileName.lastIndexOf("/") + 1); if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) { FileStatus[] storeFiles = fs.listStatus(file.getPath()); for (FileStatus storeFile : storeFiles) { HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig( getConf()), getConf()); if ((reader.getFirstKey() != null) && ((storeFirstKey == null) || (comparator.compare(storeFirstKey, reader.getFirstKey()) > 0))) { storeFirstKey = reader.getFirstKey(); } if ((reader.getLastKey() != null) && ((storeLastKey == null) || (comparator.compare(storeLastKey, reader.getLastKey())) < 0)) { storeLastKey = reader.getLastKey(); } reader.close(); } } } currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey(); currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey(); currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey); currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey); if (currentRegionBoundariesInformation.metaFirstKey.length == 0) currentRegionBoundariesInformation.metaFirstKey = null; if (currentRegionBoundariesInformation.metaLastKey.length == 0) currentRegionBoundariesInformation.metaLastKey = null; boolean valid = true; if ((currentRegionBoundariesInformation.storesFirstKey != null) && (currentRegionBoundariesInformation.metaFirstKey != null)) { valid = valid && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, currentRegionBoundariesInformation.metaFirstKey) >= 0; } if ((currentRegionBoundariesInformation.storesLastKey != null) && (currentRegionBoundariesInformation.metaLastKey != null)) { valid = valid && comparator.compare(currentRegionBoundariesInformation.storesLastKey, currentRegionBoundariesInformation.metaLastKey) < 0; } if (!valid) { errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries", tablesInfo.get(regionInfo.getTable())); LOG.warn("Region's boundaries not alligned between stores and META for:"); LOG.warn(currentRegionBoundariesInformation); } } } catch (IOException e) { LOG.error(e); } } HBaseFsck(Configuration conf); HBaseFsck(Configuration conf, ExecutorService exec); void connect(); void offlineHdfsIntegrityRepair(); int onlineConsistencyRepair(); int onlineHbck(); static byte[] keyOnly(byte[] b); void checkRegionBoundaries(); ErrorReporter getErrors(); void fixEmptyMetaCells(); void fixOrphanTables(); boolean rebuildMeta(boolean fix); void loadHdfsRegionDirs(); int mergeRegionDirs(Path targetRegionDir, HbckInfo contained); void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions); void dumpSidelinedRegions(Map<Path, HbckInfo> regions); Multimap<byte[], HbckInfo> getOverlapGroups( TableName table); static void setDisplayFullReport(); void setFixTableLocks(boolean shouldFix); void setFixAssignments(boolean shouldFix); void setFixMeta(boolean shouldFix); void setFixEmptyMetaCells(boolean shouldFix); void setCheckHdfs(boolean checking); void setFixHdfsHoles(boolean shouldFix); void setFixTableOrphans(boolean shouldFix); void setFixHdfsOverlaps(boolean shouldFix); void setFixHdfsOrphans(boolean shouldFix); void setFixVersionFile(boolean shouldFix); boolean shouldFixVersionFile(); void setSidelineBigOverlaps(boolean sbo); boolean shouldSidelineBigOverlaps(); void setFixSplitParents(boolean shouldFix); void setFixReferenceFiles(boolean shouldFix); boolean shouldIgnorePreCheckPermission(); void setIgnorePreCheckPermission(boolean ignorePreCheckPermission); void setMaxMerge(int mm); int getMaxMerge(); void setMaxOverlapsToSideline(int mo); int getMaxOverlapsToSideline(); void includeTable(TableName table); void setTimeLag(long seconds); void setSidelineDir(String sidelineDir); HFileCorruptionChecker getHFilecorruptionChecker(); void setHFileCorruptionChecker(HFileCorruptionChecker hfcc); void setRetCode(int code); int getRetCode(); static void main(String[] args); HBaseFsck exec(ExecutorService exec, String[] args); static void debugLsr(Configuration conf, Path p); static void debugLsr(Configuration conf, Path p, ErrorReporter errors); static final long DEFAULT_TIME_LAG; static final long DEFAULT_SLEEP_BEFORE_RERUN; }
@Test public void testRegionBoundariesCheck() throws Exception { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); try { hbck.checkRegionBoundaries(); } catch (IllegalArgumentException e) { if (e.getMessage().endsWith("not a valid DFS filename.")) { fail("Table directory path is not valid." + e.getMessage()); } } }
HFileArchiveUtil { public static Path getTableArchivePath(final Path rootdir, final TableName tableName) { return FSUtils.getTableDir(getArchivePath(rootdir), tableName); } private HFileArchiveUtil(); static Path getStoreArchivePath(final Configuration conf, final TableName tableName, final String regionName, final String familyName); static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir, byte[] family); static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir); static Path getRegionArchiveDir(Path rootDir, TableName tableName, String encodedRegionName); static Path getTableArchivePath(final Path rootdir, final TableName tableName); static Path getTableArchivePath(final Configuration conf, final TableName tableName); static Path getArchivePath(Configuration conf); }
@Test public void testGetTableArchivePath() { assertNotNull(HFileArchiveUtil.getTableArchivePath(rootDir, TableName.valueOf("table"))); }
HFileArchiveUtil { public static Path getArchivePath(Configuration conf) throws IOException { return getArchivePath(FSUtils.getRootDir(conf)); } private HFileArchiveUtil(); static Path getStoreArchivePath(final Configuration conf, final TableName tableName, final String regionName, final String familyName); static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir, byte[] family); static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir); static Path getRegionArchiveDir(Path rootDir, TableName tableName, String encodedRegionName); static Path getTableArchivePath(final Path rootdir, final TableName tableName); static Path getTableArchivePath(final Configuration conf, final TableName tableName); static Path getArchivePath(Configuration conf); }
@Test public void testGetArchivePath() throws Exception { Configuration conf = new Configuration(); FSUtils.setRootDir(conf, new Path("root")); assertNotNull(HFileArchiveUtil.getArchivePath(conf)); }
HFileArchiveUtil { public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir) { Path archiveDir = getTableArchivePath(rootDir, tableName); String encodedRegionName = regiondir.getName(); return HRegion.getRegionDir(archiveDir, encodedRegionName); } private HFileArchiveUtil(); static Path getStoreArchivePath(final Configuration conf, final TableName tableName, final String regionName, final String familyName); static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir, byte[] family); static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir); static Path getRegionArchiveDir(Path rootDir, TableName tableName, String encodedRegionName); static Path getTableArchivePath(final Path rootdir, final TableName tableName); static Path getTableArchivePath(final Configuration conf, final TableName tableName); static Path getArchivePath(Configuration conf); }
@Test public void testRegionArchiveDir() { Path regionDir = new Path("region"); assertNotNull(HFileArchiveUtil.getRegionArchiveDir(rootDir, TableName.valueOf("table"), regionDir)); }
HFileArchiveUtil { public static Path getStoreArchivePath(final Configuration conf, final TableName tableName, final String regionName, final String familyName) throws IOException { Path tableArchiveDir = getTableArchivePath(conf, tableName); return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName)); } private HFileArchiveUtil(); static Path getStoreArchivePath(final Configuration conf, final TableName tableName, final String regionName, final String familyName); static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir, byte[] family); static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir); static Path getRegionArchiveDir(Path rootDir, TableName tableName, String encodedRegionName); static Path getTableArchivePath(final Path rootdir, final TableName tableName); static Path getTableArchivePath(final Configuration conf, final TableName tableName); static Path getArchivePath(Configuration conf); }
@Test public void testGetStoreArchivePath() throws IOException { byte[] family = Bytes.toBytes("Family"); Path tabledir = FSUtils.getTableDir(rootDir, TableName.valueOf("table")); HRegionInfo region = new HRegionInfo(TableName.valueOf("table")); Configuration conf = new Configuration(); FSUtils.setRootDir(conf, new Path("root")); assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family)); }
RegionSizeCalculator { public long getRegionSize(byte[] regionId) { Long size = sizeMap.get(regionId); if (size == null) { LOG.debug("Unknown region:" + Arrays.toString(regionId)); return 0; } else { return size; } } RegionSizeCalculator(HTable table); RegionSizeCalculator(HTable table, HBaseAdmin admin); long getRegionSize(byte[] regionId); Map<byte[], Long> getRegionSizeMap(); }
@Test public void testLargeRegion() throws Exception { HTable table = mockTable("largeRegion"); HBaseAdmin admin = mockAdmin( mockServer( mockRegion("largeRegion", Integer.MAX_VALUE) ) ); RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes())); }
BoundedPriorityBlockingQueue extends AbstractQueue<E> implements BlockingQueue<E> { public E poll() { E result = null; lock.lock(); try { if (queue.size() > 0) { result = queue.poll(); notFull.signal(); } } finally { lock.unlock(); } return result; } BoundedPriorityBlockingQueue(int capacity, Comparator<? super E> comparator); boolean offer(E e); void put(E e); boolean offer(E e, long timeout, TimeUnit unit); E take(); E poll(); E poll(long timeout, TimeUnit unit); E peek(); int size(); Iterator<E> iterator(); Comparator<? super E> comparator(); int remainingCapacity(); boolean remove(Object o); boolean contains(Object o); int drainTo(Collection<? super E> c); int drainTo(Collection<? super E> c, int maxElements); }
@Test public void testPoll() { assertNull(queue.poll()); PriorityQueue<TestObject> testList = new PriorityQueue<TestObject>(CAPACITY, new TestObjectComparator()); for (int i = 0; i < CAPACITY; ++i) { TestObject obj = new TestObject(i, i); testList.add(obj); queue.offer(obj); } for (int i = 0; i < CAPACITY; ++i) { assertEquals(testList.poll(), queue.poll()); } assertNull(null, queue.poll()); }
FSVisitor { public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor) throws IOException { FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { LOG.info("No regions under directory:" + tableDir); return; } for (FileStatus region: regions) { visitRegionStoreFiles(fs, region.getPath(), visitor); } } private FSVisitor(); static void visitRegions(final FileSystem fs, final Path tableDir, final RegionVisitor visitor); static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor); static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, final StoreFileVisitor visitor); static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitLogFiles(final FileSystem fs, final Path rootDir, final LogFileVisitor visitor); }
@Test public void testVisitStoreFiles() throws IOException { final Set<String> regions = new HashSet<String>(); final Set<String> families = new HashSet<String>(); final Set<String> hfiles = new HashSet<String>(); FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { public void storeFile(final String region, final String family, final String hfileName) throws IOException { regions.add(region); families.add(family); hfiles.add(hfileName); } }); assertEquals(tableRegions, regions); assertEquals(tableFamilies, families); assertEquals(tableHFiles, hfiles); }
FSVisitor { public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor) throws IOException { FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { LOG.info("No recoveredEdits regions under directory:" + tableDir); return; } for (FileStatus region: regions) { visitRegionRecoveredEdits(fs, region.getPath(), visitor); } } private FSVisitor(); static void visitRegions(final FileSystem fs, final Path tableDir, final RegionVisitor visitor); static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor); static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, final StoreFileVisitor visitor); static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitLogFiles(final FileSystem fs, final Path rootDir, final LogFileVisitor visitor); }
@Test public void testVisitRecoveredEdits() throws IOException { final Set<String> regions = new HashSet<String>(); final Set<String> edits = new HashSet<String>(); FSVisitor.visitTableRecoveredEdits(fs, tableDir, new FSVisitor.RecoveredEditsVisitor() { public void recoveredEdits (final String region, final String logfile) throws IOException { regions.add(region); edits.add(logfile); } }); assertEquals(tableRegions, regions); assertEquals(recoveredEdits, edits); }
FSVisitor { public static void visitLogFiles(final FileSystem fs, final Path rootDir, final LogFileVisitor visitor) throws IOException { Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); FileStatus[] logServerDirs = FSUtils.listStatus(fs, logsDir); if (logServerDirs == null) { LOG.info("No logs under directory:" + logsDir); return; } for (FileStatus serverLogs: logServerDirs) { String serverName = serverLogs.getPath().getName(); FileStatus[] hlogs = FSUtils.listStatus(fs, serverLogs.getPath()); if (hlogs == null) { LOG.debug("No hfiles found for server: " + serverName + ", skipping."); continue; } for (FileStatus hlogRef: hlogs) { visitor.logFile(serverName, hlogRef.getPath().getName()); } } } private FSVisitor(); static void visitRegions(final FileSystem fs, final Path tableDir, final RegionVisitor visitor); static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor); static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, final StoreFileVisitor visitor); static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir, final FSVisitor.RecoveredEditsVisitor visitor); static void visitLogFiles(final FileSystem fs, final Path rootDir, final LogFileVisitor visitor); }
@Test public void testVisitLogFiles() throws IOException { final Set<String> servers = new HashSet<String>(); final Set<String> logs = new HashSet<String>(); FSVisitor.visitLogFiles(fs, rootDir, new FSVisitor.LogFileVisitor() { public void logFile (final String server, final String logfile) throws IOException { servers.add(server); logs.add(logfile); } }); assertEquals(regionServers, servers); assertEquals(serverLogs, logs); }
FSUtils { public static boolean isHDFS(final Configuration conf) throws IOException { FileSystem fs = FileSystem.get(conf); String scheme = fs.getUri().getScheme(); return scheme.equalsIgnoreCase("hdfs"); } protected FSUtils(); static boolean isStartingWithPath(final Path rootPath, final String path); static boolean isMatchingTail(final Path pathToSearch, String pathTail); static boolean isMatchingTail(final Path pathToSearch, final Path pathTail); static FSUtils getInstance(FileSystem fs, Configuration conf); static boolean deleteDirectory(final FileSystem fs, final Path dir); static long getDefaultBlockSize(final FileSystem fs, final Path path); static short getDefaultReplication(final FileSystem fs, final Path path); static int getDefaultBufferSize(final FileSystem fs); static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, InetSocketAddress[] favoredNodes); static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite); static FsPermission getFilePermissions(final FileSystem fs, final Configuration conf, final String permssionConfKey); static void checkFileSystemAvailable(final FileSystem fs); static void checkDfsSafeMode(final Configuration conf); static String getVersion(FileSystem fs, Path rootdir); static void checkVersion(FileSystem fs, Path rootdir, boolean message); static void checkVersion(FileSystem fs, Path rootdir, boolean message, int wait, int retries); static void setVersion(FileSystem fs, Path rootdir); static void setVersion(FileSystem fs, Path rootdir, int wait, int retries); static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries); static boolean checkClusterIdExists(FileSystem fs, Path rootdir, int wait); static ClusterId getClusterId(FileSystem fs, Path rootdir); static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId, int wait); static Path validateRootPath(Path root); static String removeRootPath(Path path, final Configuration conf); static void waitOnSafeMode(final Configuration conf, final long wait); static String getPath(Path p); static Path getRootDir(final Configuration c); static void setRootDir(final Configuration c, final Path root); static void setFsDefault(final Configuration c, final Path root); @SuppressWarnings("deprecation") static boolean metaRegionExists(FileSystem fs, Path rootdir); static HDFSBlocksDistribution computeHDFSBlocksDistribution( final FileSystem fs, FileStatus status, long start, long length); static boolean isMajorCompacted(final FileSystem fs, final Path hbaseRootDir); static int getTotalTableFragmentation(final HMaster master); static Map<String, Integer> getTableFragmentation( final HMaster master); static Map<String, Integer> getTableFragmentation( final FileSystem fs, final Path hbaseRootDir); static boolean isPre020FileLayout(final FileSystem fs, final Path hbaseRootDir); static boolean isMajorCompactedPre020(final FileSystem fs, final Path hbaseRootDir); static Path getTableDir(Path rootdir, final TableName tableName); static TableName getTableName(Path tablePath); static Path getNamespaceDir(Path rootdir, final String namespace); static boolean isAppendSupported(final Configuration conf); static boolean isHDFS(final Configuration conf); abstract void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter); static List<Path> getTableDirs(final FileSystem fs, final Path rootdir); static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir); static boolean isRecoveredEdits(Path path); static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir); static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir); static FileSystem getCurrentFileSystem(Configuration conf); static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map, final FileSystem fs, final Path hbaseRootDir, TableName tableName); static Map<String, Path> getTableStoreFilePathMap( final FileSystem fs, final Path hbaseRootDir); static FileStatus [] listStatus(final FileSystem fs, final Path dir, final PathFilter filter); static FileStatus[] listStatus(final FileSystem fs, final Path dir); static boolean delete(final FileSystem fs, final Path path, final boolean recursive); static boolean isExists(final FileSystem fs, final Path path); static void checkAccess(UserGroupInformation ugi, FileStatus file, FsAction action); static void logFileSystemState(final FileSystem fs, final Path root, Log LOG); static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest); static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS( final Configuration conf); static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS( final Configuration conf, final String desiredTable, int threadPoolSize); static void setupShortCircuitRead(final Configuration conf); static void checkShortCircuitReadBufferSize(final Configuration conf); static final String FULL_RWX_PERMISSIONS; static final boolean WINDOWS; }
@Test public void testIsHDFS() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); htu.getConfiguration().setBoolean("dfs.support.append", false); assertFalse(FSUtils.isHDFS(htu.getConfiguration())); htu.getConfiguration().setBoolean("dfs.support.append", true); MiniDFSCluster cluster = null; try { cluster = htu.startMiniDFSCluster(1); assertTrue(FSUtils.isHDFS(htu.getConfiguration())); assertTrue(FSUtils.isAppendSupported(htu.getConfiguration())); } finally { if (cluster != null) cluster.shutdown(); } }
FSUtils { static public HDFSBlocksDistribution computeHDFSBlocksDistribution( final FileSystem fs, FileStatus status, long start, long length) throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); BlockLocation [] blockLocations = fs.getFileBlockLocations(status, start, length); for(BlockLocation bl : blockLocations) { String [] hosts = bl.getHosts(); long len = bl.getLength(); blocksDistribution.addHostsAndBlockWeight(hosts, len); } return blocksDistribution; } protected FSUtils(); static boolean isStartingWithPath(final Path rootPath, final String path); static boolean isMatchingTail(final Path pathToSearch, String pathTail); static boolean isMatchingTail(final Path pathToSearch, final Path pathTail); static FSUtils getInstance(FileSystem fs, Configuration conf); static boolean deleteDirectory(final FileSystem fs, final Path dir); static long getDefaultBlockSize(final FileSystem fs, final Path path); static short getDefaultReplication(final FileSystem fs, final Path path); static int getDefaultBufferSize(final FileSystem fs); static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, InetSocketAddress[] favoredNodes); static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite); static FsPermission getFilePermissions(final FileSystem fs, final Configuration conf, final String permssionConfKey); static void checkFileSystemAvailable(final FileSystem fs); static void checkDfsSafeMode(final Configuration conf); static String getVersion(FileSystem fs, Path rootdir); static void checkVersion(FileSystem fs, Path rootdir, boolean message); static void checkVersion(FileSystem fs, Path rootdir, boolean message, int wait, int retries); static void setVersion(FileSystem fs, Path rootdir); static void setVersion(FileSystem fs, Path rootdir, int wait, int retries); static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries); static boolean checkClusterIdExists(FileSystem fs, Path rootdir, int wait); static ClusterId getClusterId(FileSystem fs, Path rootdir); static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId, int wait); static Path validateRootPath(Path root); static String removeRootPath(Path path, final Configuration conf); static void waitOnSafeMode(final Configuration conf, final long wait); static String getPath(Path p); static Path getRootDir(final Configuration c); static void setRootDir(final Configuration c, final Path root); static void setFsDefault(final Configuration c, final Path root); @SuppressWarnings("deprecation") static boolean metaRegionExists(FileSystem fs, Path rootdir); static HDFSBlocksDistribution computeHDFSBlocksDistribution( final FileSystem fs, FileStatus status, long start, long length); static boolean isMajorCompacted(final FileSystem fs, final Path hbaseRootDir); static int getTotalTableFragmentation(final HMaster master); static Map<String, Integer> getTableFragmentation( final HMaster master); static Map<String, Integer> getTableFragmentation( final FileSystem fs, final Path hbaseRootDir); static boolean isPre020FileLayout(final FileSystem fs, final Path hbaseRootDir); static boolean isMajorCompactedPre020(final FileSystem fs, final Path hbaseRootDir); static Path getTableDir(Path rootdir, final TableName tableName); static TableName getTableName(Path tablePath); static Path getNamespaceDir(Path rootdir, final String namespace); static boolean isAppendSupported(final Configuration conf); static boolean isHDFS(final Configuration conf); abstract void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter); static List<Path> getTableDirs(final FileSystem fs, final Path rootdir); static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir); static boolean isRecoveredEdits(Path path); static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir); static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir); static FileSystem getCurrentFileSystem(Configuration conf); static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map, final FileSystem fs, final Path hbaseRootDir, TableName tableName); static Map<String, Path> getTableStoreFilePathMap( final FileSystem fs, final Path hbaseRootDir); static FileStatus [] listStatus(final FileSystem fs, final Path dir, final PathFilter filter); static FileStatus[] listStatus(final FileSystem fs, final Path dir); static boolean delete(final FileSystem fs, final Path path, final boolean recursive); static boolean isExists(final FileSystem fs, final Path path); static void checkAccess(UserGroupInformation ugi, FileStatus file, FsAction action); static void logFileSystemState(final FileSystem fs, final Path root, Log LOG); static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest); static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS( final Configuration conf); static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS( final Configuration conf, final String desiredTable, int threadPoolSize); static void setupShortCircuitRead(final Configuration conf); static void checkShortCircuitReadBufferSize(final Configuration conf); static final String FULL_RWX_PERMISSIONS; static final boolean WINDOWS; }
@Test public void testcomputeHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); final int DEFAULT_BLOCK_SIZE = 1024; htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = null; Path testFile = null; try { String hosts[] = new String[] { "host1", "host2", "host3" }; cluster = htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); testFile = new Path("/test1.txt"); WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE); final long maxTime = System.currentTimeMillis() + 2000; boolean ok; do { ok = true; FileStatus status = fs.getFileStatus(testFile); HDFSBlocksDistribution blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); long uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight(); for (String host : hosts) { long weight = blocksDistribution.getWeight(host); ok = (ok && uniqueBlocksTotalWeight == weight); } } while (!ok && System.currentTimeMillis() < maxTime); assertTrue(ok); } finally { htu.shutdownMiniDFSCluster(); } try { String hosts[] = new String[] { "host1", "host2", "host3", "host4" }; cluster = htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); testFile = new Path("/test2.txt"); WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE); final long maxTime = System.currentTimeMillis() + 2000; long weight; long uniqueBlocksTotalWeight; do { FileStatus status = fs.getFileStatus(testFile); HDFSBlocksDistribution blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight(); String tophost = blocksDistribution.getTopHosts().get(0); weight = blocksDistribution.getWeight(tophost); } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime); assertTrue(uniqueBlocksTotalWeight == weight); } finally { htu.shutdownMiniDFSCluster(); } try { String hosts[] = new String[] { "host1", "host2", "host3", "host4" }; cluster = htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); testFile = new Path("/test3.txt"); WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE); final long maxTime = System.currentTimeMillis() + 2000; HDFSBlocksDistribution blocksDistribution; do { FileStatus status = fs.getFileStatus(testFile); blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); } while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime); assertEquals("Wrong number of hosts distributing blocks.", 3, blocksDistribution.getTopHosts().size()); } finally { htu.shutdownMiniDFSCluster(); } }