use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestSeekToBlockWithEncoders method testSeekToBlockWithDiffFamilyAndQualifer.
@Test
public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException {
List<KeyValue> sampleKv = new ArrayList<>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv1);
KeyValue kv2 = new KeyValue(Bytes.toBytes("aab"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv2);
KeyValue kv4 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv4);
KeyValue kv5 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q2"), Bytes.toBytes("val"));
sampleKv.add(kv5);
KeyValue toSeek = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam2"), Bytes.toBytes("q2"), Bytes.toBytes("val"));
seekToTheKey(kv5, sampleKv, toSeek);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestSeekToBlockWithEncoders method testSeekToBlockWithNonMatchingSeekKey.
/**
* Test seeking while file is encoded.
*/
@Test
public void testSeekToBlockWithNonMatchingSeekKey() throws IOException {
List<KeyValue> sampleKv = new ArrayList<>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv1);
KeyValue kv2 = new KeyValue(Bytes.toBytes("aab"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv2);
KeyValue kv3 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv3);
KeyValue kv4 = new KeyValue(Bytes.toBytes("aad"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv4);
KeyValue kv5 = new KeyValue(Bytes.toBytes("bba"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv5);
KeyValue toSeek = new KeyValue(Bytes.toBytes("aae"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val"));
seekToTheKey(kv4, sampleKv, toSeek);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestKeyValueCompression method createKV.
private KeyValue createKV(int noOfTags) {
byte[] row = Bytes.toBytes("myRow");
byte[] cf = Bytes.toBytes("myCF");
byte[] q = Bytes.toBytes("myQualifier");
byte[] value = Bytes.toBytes("myValue");
List<Tag> tags = new ArrayList<>(noOfTags);
for (int i = 1; i <= noOfTags; i++) {
tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i)));
}
return new KeyValue(row, cf, q, HConstants.LATEST_TIMESTAMP, value, tags);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestLogRollAbort method testLogRollAfterSplitStart.
/**
* Tests the case where a RegionServer enters a GC pause,
* comes back online after the master declared it dead and started to split.
* Want log rolling after a master split to fail. See HBASE-2312.
*/
@Test(timeout = 300000)
public void testLogRollAfterSplitStart() throws IOException {
LOG.info("Verify wal roll after split starts will fail.");
String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, System.currentTimeMillis()).toString();
Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
final WALFactory wals = new WALFactory(conf, null, logName);
try {
// put some entries in an WAL
TableName tableName = TableName.valueOf(this.getClass().getName());
HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
final int total = 20;
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("column"));
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
log.append(regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs, true);
}
// Send the data to HDFS datanodes and close the HDFS writer
log.sync();
((AbstractFSWAL<?>) log).replaceWriter(((FSHLog) log).getOldPath(), null, null);
/* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
* handles RS shutdowns (as observed by the splitting process)
*/
// rename the directory so a rogue RS doesn't create more WALs
Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
if (!fs.rename(thisTestsDir, rsSplitDir)) {
throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
}
LOG.debug("Renamed region directory: " + rsSplitDir);
LOG.debug("Processing the old log files.");
WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
LOG.debug("Trying to roll the WAL.");
try {
log.rollWriter();
Assert.fail("rollWriter() did not throw any exception.");
} catch (IOException ioe) {
if (ioe.getCause() instanceof FileNotFoundException) {
LOG.info("Got the expected exception: ", ioe.getCause());
} else {
Assert.fail("Unexpected exception: " + ioe);
}
}
} finally {
wals.close();
if (fs.exists(thisTestsDir)) {
fs.delete(thisTestsDir, true);
}
}
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestUserScanQueryMatcher method testMatch_Wildcard.
@Test
public void testMatch_Wildcard() throws IOException {
// Moving up from the Tracker by using Gets and List<KeyValue> instead
// of just byte []
// Expected result
List<MatchCode> expected = new ArrayList<>(6);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
expected.add(ScanQueryMatcher.MatchCode.DONE);
long now = EnvironmentEdgeManager.currentTime();
UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), null, now - ttl, now, null);
List<KeyValue> memstore = new ArrayList<>(6);
memstore.add(new KeyValue(row1, fam2, col1, 1, data));
memstore.add(new KeyValue(row1, fam2, col2, 1, data));
memstore.add(new KeyValue(row1, fam2, col3, 1, data));
memstore.add(new KeyValue(row1, fam2, col4, 1, data));
memstore.add(new KeyValue(row1, fam2, col5, 1, data));
memstore.add(new KeyValue(row2, fam1, col1, 1, data));
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(memstore.size());
KeyValue k = memstore.get(0);
qm.setToNewRow(k);
for (KeyValue kv : memstore) {
actual.add(qm.match(kv));
}
assertEquals(expected.size(), actual.size());
for (int i = 0; i < expected.size(); i++) {
LOG.debug("expected " + expected.get(i) + ", actual " + actual.get(i));
assertEquals(expected.get(i), actual.get(i));
}
}
Aggregations