use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestDataBlockEncoders method testAlgorithm.
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, DataBlockEncoder encoder) throws IOException {
// decode
ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET, encodedData.length - ENCODED_DATA_OFFSET);
DataInputStream dis = new DataInputStream(bais);
ByteBuffer actualDataset;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
actualDataset.rewind();
// this is because in case of prefix tree the decoded stream will not have
// the
// mvcc in it.
assertEquals("Encoding -> decoding gives different results for " + encoder, Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestDataBlockEncoders method getEncodingContext.
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo, DataBlockEncoding encoding) {
DataBlockEncoder encoder = encoding.getEncoder();
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(algo).build();
if (encoder != null) {
return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
} else {
return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestDataBlockEncoders method testNextOnSample.
@Test
public void testNextOnSample() throws IOException {
List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
// TODO remove this once support is added. HBASE-12298
if (this.useOffheapData && encoding == DataBlockEncoding.PREFIX_TREE)
continue;
if (encoding.getEncoder() == null) {
continue;
}
DataBlockEncoder encoder = encoding.getEncoder();
ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
int i = 0;
do {
KeyValue expectedKeyValue = sampleKv.get(i);
Cell cell = seeker.getCell();
if (CellComparator.COMPARATOR.compareKeyIgnoresMvcc(expectedKeyValue, cell) != 0) {
int commonPrefix = CellUtil.findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true);
fail(String.format("next() produces wrong results " + "encoder: %s i: %d commonPrefix: %d" + "\n expected %s\n actual %s", encoder.toString(), i, commonPrefix, Bytes.toStringBinary(expectedKeyValue.getBuffer(), expectedKeyValue.getKeyOffset(), expectedKeyValue.getKeyLength()), CellUtil.toString(cell, false)));
}
i++;
} while (seeker.next());
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPartitionedMobCompactor method createMobFile.
/**
* Create mulitple partition files
*/
private void createMobFile(Path basePath) throws IOException {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
MobFileName mobFileName = null;
int ii = 0;
Date today = new Date();
for (byte k0 : KEYS) {
byte[] startRow = Bytes.toBytes(ii++);
mobFileName = MobFileName.create(startRow, MobUtils.formatDate(today), mobSuffix);
StoreFileWriter mobFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build();
long now = System.currentTimeMillis();
try {
for (int i = 0; i < 10; i++) {
byte[] key = Bytes.add(Bytes.toBytes(k0), Bytes.toBytes(i));
byte[] dummyData = new byte[5000];
new Random().nextBytes(dummyData);
mobFileWriter.append(new KeyValue(key, Bytes.toBytes(family), Bytes.toBytes(qf), now, Type.Put, dummyData));
}
} finally {
mobFileWriter.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPartitionedMobCompactor method createStoreFiles.
private void createStoreFiles(Path basePath, String family, String qualifier, int count, Type type, boolean sameStartKey, final Date date) throws IOException {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
String startKey = "row_";
MobFileName mobFileName = null;
for (int i = 0; i < count; i++) {
byte[] startRow;
if (sameStartKey) {
// When creating multiple files under one partition, suffix needs to be different.
startRow = Bytes.toBytes(startKey);
mobSuffix = UUID.randomUUID().toString().replaceAll("-", "");
delSuffix = UUID.randomUUID().toString().replaceAll("-", "") + "_del";
} else {
startRow = Bytes.toBytes(startKey + i);
}
if (type.equals(Type.Delete)) {
mobFileName = MobFileName.create(startRow, MobUtils.formatDate(date), delSuffix);
}
if (type.equals(Type.Put)) {
mobFileName = MobFileName.create(startRow, MobUtils.formatDate(date), mobSuffix);
}
StoreFileWriter mobFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build();
writeStoreFile(mobFileWriter, startRow, Bytes.toBytes(family), Bytes.toBytes(qualifier), type, (i + 1) * 1000);
}
}
Aggregations