use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestCachedMobFile method testCompare.
@Test
public void testCompare() throws Exception {
String caseName = getName();
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
Path outputDir1 = new Path(testDir, FAMILY1);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer1 = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(outputDir1).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer1, caseName);
CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf);
Path outputDir2 = new Path(testDir, FAMILY2);
StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(outputDir2).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer2, caseName);
CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf);
cachedMobFile1.access(1);
cachedMobFile2.access(2);
Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile2), 1);
Assert.assertEquals(cachedMobFile2.compareTo(cachedMobFile1), -1);
Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile1), 0);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class DataBlockEncodingTool method checkStatistics.
/**
* Check statistics for given HFile for different data block encoders.
* @param scanner Of file which will be compressed.
* @param kvLimit Maximal count of KeyValue which will be processed.
* @throws IOException thrown if scanner is invalid
*/
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) throws IOException {
scanner.seek(KeyValue.LOWESTKEY);
KeyValue currentKV;
byte[] previousKey = null;
byte[] currentKey;
DataBlockEncoding[] encodings = DataBlockEncoding.values();
ByteArrayOutputStream uncompressedOutputStream = new ByteArrayOutputStream();
int j = 0;
while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
// Iterates through key/value pairs
j++;
currentKey = currentKV.getKey();
if (previousKey != null) {
for (int i = 0; i < previousKey.length && i < currentKey.length && previousKey[i] == currentKey[i]; ++i) {
totalKeyRedundancyLength++;
}
}
uncompressedOutputStream.write(currentKV.getBuffer(), currentKV.getOffset(), currentKV.getLength());
previousKey = currentKey;
int kLen = currentKV.getKeyLength();
int vLen = currentKV.getValueLength();
int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
int restLen = currentKV.getLength() - kLen - vLen;
totalKeyLength += kLen;
totalValueLength += vLen;
totalPrefixLength += restLen;
totalCFLength += cfLen;
}
rawKVs = uncompressedOutputStream.toByteArray();
boolean useTag = (currentKV.getTagsLength() > 0);
for (DataBlockEncoding encoding : encodings) {
if (encoding == DataBlockEncoding.NONE) {
continue;
}
DataBlockEncoder d = encoding.getEncoder();
HFileContext meta = new HFileContextBuilder().withCompression(Compression.Algorithm.NONE).withIncludesMvcc(includesMemstoreTS).withIncludesTags(useTag).build();
codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta));
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestFSErrorsExposed method testStoreFileScannerThrowsErrors.
/**
* Injects errors into the pread calls of an on-disk file, and makes
* sure those bubble up to the StoreFileScanner
*/
@Test
public void testStoreFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
HFileSystem fs = new HFileSystem(faultyfs);
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, true, false, false, // 0 is passed as readpoint because this test operates on StoreFile directly
0);
KeyValueScanner scanner = scanners.get(0);
FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
assertNotNull(inStream);
scanner.seek(KeyValue.LOWESTKEY);
// Do at least one successful read
assertNotNull(scanner.next());
faultyfs.startFaults();
try {
int scanned = 0;
while (scanner.next() != null) {
scanned++;
}
fail("Scanner didn't throw after faults injected");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
assertTrue(ioe.getMessage().contains("Could not iterate"));
}
scanner.close();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestHRegionReplayEvents method createHFileForFamilies.
private String createHFileForFamilies(Path testPath, byte[] family, byte[] valueBytes) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
// TODO We need a way to do this without creating files
Path testFile = new Path(testPath, UUID.randomUUID().toString());
FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l, KeyValue.Type.Put.getCode(), valueBytes)));
} finally {
writer.close();
}
} finally {
out.close();
}
return testFile.toString();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestMobStoreCompaction method createHFile.
/**
* Create an HFile with the given number of bytes
*/
private void createHFile(Path path, int rowIdx, byte[] dummyData) throws IOException {
HFileContext meta = new HFileContextBuilder().build();
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path).withFileContext(meta).create();
long now = System.currentTimeMillis();
try {
KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY, Bytes.toBytes("colX"), now, dummyData);
writer.append(kv);
} finally {
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
writer.close();
}
}
Aggregations