Search in sources :

Example 41 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method createFile.

private static HStoreFile createFile(long size) throws Exception {
    HStoreFile sf = mock(HStoreFile.class);
    when(sf.getPath()).thenReturn(new Path("moo"));
    StoreFileReader r = mock(StoreFileReader.class);
    when(r.getEntries()).thenReturn(size);
    when(r.length()).thenReturn(size);
    when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
    when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
    when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class));
    when(sf.getReader()).thenReturn(r);
    when(sf.getBulkLoadTimestamp()).thenReturn(OptionalLong.empty());
    when(r.getMaxTimestamp()).thenReturn(TimeRange.INITIAL_MAX_TIMESTAMP);
    return sf;
}
Also used : Path(org.apache.hadoop.fs.Path) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) StoreFileReader(org.apache.hadoop.hbase.regionserver.StoreFileReader) StoreFileReader(org.apache.hadoop.hbase.regionserver.StoreFileReader) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner)

Example 42 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestCompactedHFilesDischarger method testCleanerWithParallelScannersAfterCompaction.

@Test
public void testCleanerWithParallelScannersAfterCompaction() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    HStore store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<HStoreFile> storefiles = store.getStorefiles();
    Collection<HStoreFile> compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (HStoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // Do compaction
    region.compact(true);
    startScannerThreads();
    storefiles = store.getStorefiles();
    int usedReaderCount = 0;
    int unusedReaderCount = 0;
    for (HStoreFile file : storefiles) {
        if (((HStoreFile) file).getRefCount() == 3) {
            usedReaderCount++;
        }
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    for (HStoreFile file : compactedfiles) {
        assertEquals("Refcount should be 3", 0, ((HStoreFile) file).getRefCount());
        unusedReaderCount++;
    }
    // Though there are files we are not using them for reads
    assertEquals("unused reader count should be 3", 3, unusedReaderCount);
    assertEquals("used reader count should be 1", 1, usedReaderCount);
    // now run the cleaner
    cleaner.chore();
    countDown();
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (HStoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 43 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestCompactedHFilesDischarger method testCompactedHFilesCleaner.

@Test
public void testCompactedHFilesCleaner() throws Exception {
    // Create the cleaner object
    CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
    // Add some data to the region and do some flushes
    for (int i = 1; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 11; i < 20; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    for (int i = 21; i < 30; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(fam, qual1, val);
        region.put(p);
    }
    // flush them
    region.flush(true);
    HStore store = region.getStore(fam);
    assertEquals(3, store.getStorefilesCount());
    Collection<HStoreFile> storefiles = store.getStorefiles();
    Collection<HStoreFile> compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles();
    // None of the files should be in compacted state.
    for (HStoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // Try to run the cleaner without compaction. there should not be any change
    cleaner.chore();
    storefiles = store.getStorefiles();
    // None of the files should be in compacted state.
    for (HStoreFile file : storefiles) {
        assertFalse(file.isCompactedAway());
    }
    // now do some compaction
    region.compact(true);
    // Still the flushed files should be present until the cleaner runs. But the state of it should
    // be in COMPACTED state
    assertEquals(1, store.getStorefilesCount());
    assertEquals(3, ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles().size());
    // Run the cleaner
    cleaner.chore();
    assertEquals(1, store.getStorefilesCount());
    storefiles = store.getStorefiles();
    for (HStoreFile file : storefiles) {
        // Should not be in compacted state
        assertFalse(file.isCompactedAway());
    }
    compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
    assertTrue(compactedfiles.isEmpty());
}
Also used : CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 44 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestDateTieredCompactor method verify.

private void verify(KeyValue[] input, List<Long> boundaries, KeyValue[][] output, boolean allFiles) throws Exception {
    StoreFileWritersCapture writers = new StoreFileWritersCapture();
    HStoreFile sf1 = createDummyStoreFile(1L);
    HStoreFile sf2 = createDummyStoreFile(2L);
    DateTieredCompactor dtc = createCompactor(writers, input, Arrays.asList(sf1, sf2));
    List<Path> paths = dtc.compact(new CompactionRequestImpl(Arrays.asList(sf1)), boundaries.subList(0, boundaries.size() - 1), new HashMap<Long, String>(), NoLimitThroughputController.INSTANCE, null);
    writers.verifyKvs(output, allFiles, boundaries);
    if (allFiles) {
        assertEquals(output.length, paths.size());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) OptionalLong(java.util.OptionalLong) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) StoreFileWritersCapture(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.StoreFileWritersCapture)

Aggregations

HStoreFile (org.apache.hadoop.hbase.regionserver.HStoreFile)44 ArrayList (java.util.ArrayList)18 Test (org.junit.Test)16 Path (org.apache.hadoop.fs.Path)11 Configuration (org.apache.hadoop.conf.Configuration)8 HStore (org.apache.hadoop.hbase.regionserver.HStore)8 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)8 IOException (java.io.IOException)6 OptionalLong (java.util.OptionalLong)6 TableName (org.apache.hadoop.hbase.TableName)5 Put (org.apache.hadoop.hbase.client.Put)5 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 StoreFileReader (org.apache.hadoop.hbase.regionserver.StoreFileReader)4 ImmutableList (org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList)4 InterruptedIOException (java.io.InterruptedIOException)3 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)3 ManualEnvironmentEdge (org.apache.hadoop.hbase.util.ManualEnvironmentEdge)3 FileNotFoundException (java.io.FileNotFoundException)2