Search in sources :

Example 41 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestCompactedHFilesDischarger method testCompactedHFilesCleaner.

@Test
public void testCompactedHFilesCleaner() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    Store store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<StoreFile> storefiles = store.getStorefiles();
    Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // Try to run the cleaner without compaction. there should not be any change
    cleaner.chore();
    storefiles = store.getStorefiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // now do some compaction
    region.compact(true);
    // Still the flushed files should be present until the cleaner runs. But the state of it should
    // be in COMPACTED state
    assertEquals(1, store.getStorefilesCount());
    assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
    // Run the cleaner
    cleaner.chore();
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (StoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Store(org.apache.hadoop.hbase.regionserver.Store) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 42 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestCompactedHFilesDischarger method testCleanerWithParallelScannersAfterCompaction.

@Test
public void testCleanerWithParallelScannersAfterCompaction() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    Store store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<StoreFile> storefiles = store.getStorefiles();
    Collection<StoreFile> compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (StoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // Do compaction
    region.compact(true);
    startScannerThreads();
    storefiles = store.getStorefiles();
    int usedReaderCount = 0;
    int unusedReaderCount = 0;
    for (StoreFile file : storefiles) {
        if (file.getRefCount() == 3) {
            usedReaderCount++;
        }
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    for (StoreFile file : compactedfiles) {
        assertEquals("Refcount should be 3", 0, file.getRefCount());
        unusedReaderCount++;
    }
    // Though there are files we are not using them for reads
    assertEquals("unused reader count should be 3", 3, unusedReaderCount);
    assertEquals("used reader count should be 1", 1, usedReaderCount);
    // now run the cleaner
    cleaner.chore();
    countDown();
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (StoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Store(org.apache.hadoop.hbase.regionserver.Store) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 43 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestDateTieredCompactor method verify.

private void verify(KeyValue[] input, List<Long> boundaries, KeyValue[][] output, boolean allFiles) throws Exception {
    StoreFileWritersCapture writers = new StoreFileWritersCapture();
    StoreFile sf1 = createDummyStoreFile(1L);
    StoreFile sf2 = createDummyStoreFile(2L);
    DateTieredCompactor dtc = createCompactor(writers, input, Arrays.asList(sf1, sf2));
    List<Path> paths = dtc.compact(new CompactionRequest(Arrays.asList(sf1)), boundaries.subList(0, boundaries.size() - 1), NoLimitThroughputController.INSTANCE, null);
    writers.verifyKvs(output, allFiles, boundaries);
    if (allFiles) {
        assertEquals(output.length, paths.size());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TestCompactor.createDummyStoreFile(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.createDummyStoreFile) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) StoreFileWritersCapture(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.StoreFileWritersCapture)

Example 44 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method createStripes.

private static StripeInformationProvider createStripes(List<byte[]> boundaries, List<List<Long>> stripeSizes, List<Long> l0Sizes) throws Exception {
    List<List<StoreFile>> stripeFiles = new ArrayList<>(stripeSizes.size());
    for (List<Long> sizes : stripeSizes) {
        List<StoreFile> sfs = new ArrayList<>(sizes.size());
        for (Long size : sizes) {
            sfs.add(createFile(size));
        }
        stripeFiles.add(sfs);
    }
    List<StoreFile> l0Files = new ArrayList<>();
    for (Long size : l0Sizes) {
        l0Files.add(createFile(size));
    }
    return createStripesWithFiles(boundaries, stripeFiles, l0Files);
}
Also used : ArrayList(java.util.ArrayList) Matchers.anyLong(org.mockito.Matchers.anyLong) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList)

Example 45 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method createFile.

private static StoreFile createFile(long size) throws Exception {
    StoreFile sf = mock(StoreFile.class);
    when(sf.getPath()).thenReturn(new Path("moo"));
    StoreFileReader r = mock(StoreFileReader.class);
    when(r.getEntries()).thenReturn(size);
    when(r.length()).thenReturn(size);
    when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
    when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
    when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class));
    when(sf.getReader()).thenReturn(r);
    when(sf.createReader(anyBoolean())).thenReturn(r);
    when(sf.createReader()).thenReturn(r);
    when(sf.cloneForReader()).thenReturn(sf);
    return sf;
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) StoreFileReader(org.apache.hadoop.hbase.regionserver.StoreFileReader) StoreFileReader(org.apache.hadoop.hbase.regionserver.StoreFileReader) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner)

Aggregations

StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)52 ArrayList (java.util.ArrayList)22 Path (org.apache.hadoop.fs.Path)15 Test (org.junit.Test)13 IOException (java.io.IOException)10 Store (org.apache.hadoop.hbase.regionserver.Store)6 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)6 StoreFileReader (org.apache.hadoop.hbase.regionserver.StoreFileReader)5 ImmutableList (com.google.common.collect.ImmutableList)4 Configuration (org.apache.hadoop.conf.Configuration)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 Put (org.apache.hadoop.hbase.client.Put)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 Cell (org.apache.hadoop.hbase.Cell)3 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)3 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)3 ConcatenatedLists (org.apache.hadoop.hbase.util.ConcatenatedLists)3 FileNotFoundException (java.io.FileNotFoundException)2