use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestHFileArchiving method testArchiveStoreFilesDifferentFileSystems.
private void testArchiveStoreFilesDifferentFileSystems(String walDir, String expectedBase, ArchivingFunction<Configuration, FileSystem, RegionInfo, Path, byte[], Collection<HStoreFile>> archivingFunction) throws IOException {
FileSystem mockedFileSystem = mock(FileSystem.class);
Configuration conf = new Configuration(UTIL.getConfiguration());
if (walDir != null) {
conf.set(CommonFSUtils.HBASE_WAL_DIR, walDir);
}
Path filePath = new Path("/mockDir/wals/mockFile");
when(mockedFileSystem.getScheme()).thenReturn("mockFS");
when(mockedFileSystem.mkdirs(any())).thenReturn(true);
when(mockedFileSystem.exists(any())).thenReturn(true);
RegionInfo mockedRegion = mock(RegionInfo.class);
TableName tableName = TableName.valueOf("mockTable");
when(mockedRegion.getTable()).thenReturn(tableName);
when(mockedRegion.getEncodedName()).thenReturn("mocked-region-encoded-name");
Path tableDir = new Path("mockFS://mockDir/tabledir");
byte[] family = Bytes.toBytes("testfamily");
HStoreFile mockedFile = mock(HStoreFile.class);
List<HStoreFile> list = new ArrayList<>();
list.add(mockedFile);
when(mockedFile.getPath()).thenReturn(filePath);
when(mockedFileSystem.rename(any(), any())).thenReturn(true);
archivingFunction.apply(conf, mockedFileSystem, mockedRegion, tableDir, family, list);
ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
verify(mockedFileSystem, times(2)).rename(pathCaptor.capture(), any());
String expectedDir = expectedBase + "archive/data/default/mockTable/mocked-region-encoded-name/testfamily/mockFile";
assertTrue(pathCaptor.getAllValues().get(0).toString().equals(expectedDir));
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class HFileArchiver method archive.
private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, Collection<HStoreFile> compactedFiles, Path storeArchiveDir) throws IOException {
// sometimes in testing, we don't have rss, so we need to check for that
if (fs == null) {
LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family));
deleteStoreFilesWithoutArchiving(compactedFiles);
return;
}
// short circuit if we don't have any files to delete
if (compactedFiles.isEmpty()) {
LOG.debug("No files to dispose of, done!");
return;
}
// build the archive path
if (regionInfo == null || family == null)
throw new IOException("Need to have a region and a family to archive from.");
// make sure we don't archive if we can't and that the archive dir exists
if (!fs.mkdirs(storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" + Bytes.toString(family) + ", deleting compacted files instead.");
}
// otherwise we attempt to archive the store files
LOG.debug("Archiving compacted files.");
// Wrap the storefile into a File
StoreToFile getStorePath = new StoreToFile(fs);
Collection<File> storeFiles = compactedFiles.stream().map(getStorePath).collect(Collectors.toList());
// do the actual archive
List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime());
if (!failedArchive.isEmpty()) {
throw new FailedArchiveException("Failed to archive/delete all the files for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList()));
}
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestMobFile method testReadKeyValue.
@Test
public void testReadKeyValue() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
String caseName = testName.getMethodName();
MobTestUtil.writeStoreFile(writer, caseName);
MobFile mobFile = new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
// The start key bytes
byte[] startKey = Bytes.toBytes("aa");
KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
Cell cell = mobFile.readCell(seekKey, false).getCell();
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the end key
// The end key bytes
byte[] endKey = Bytes.toBytes("zz");
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false).getCell();
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false).getCell();
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is less than the start key
// Smaller than "aa"
byte[] lowerKey = Bytes.toBytes("a1");
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
cell = mobFile.readCell(seekKey, false).getCell();
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is more than the end key
// Bigger than "zz"
byte[] upperKey = Bytes.toBytes("z{");
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
assertNull(mobFile.readCell(seekKey, false));
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestMobStoreCompaction method countMobCellsInMetadata.
private long countMobCellsInMetadata() throws IOException {
long mobCellsCount = 0;
Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(), familyDescriptor.getNameAsString());
Configuration copyOfConf = new Configuration(conf);
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
if (fs.exists(mobDirPath)) {
FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath);
for (FileStatus file : files) {
HStoreFile sf = new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true);
sf.initReader();
Map<byte[], byte[]> fileInfo = sf.getReader().loadFileInfo();
byte[] count = fileInfo.get(MOB_CELLS_COUNT);
assertTrue(count != null);
mobCellsCount += Bytes.toLong(count);
}
}
return mobCellsCount;
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestMobFile method testGetScanner.
@Test
public void testGetScanner() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer, testName.getMethodName());
MobFile mobFile = new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
Aggregations