use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestTags method testFlushAndCompactionwithCombinations.
@Test
public void testFlushAndCompactionwithCombinations() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
byte[] rowd = Bytes.toBytes("rowd");
byte[] rowe = Bytes.toBytes("rowe");
Table table = null;
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
colDesc.setDataBlockEncoding(encoding);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
try {
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
int bigTagLen = Short.MAX_VALUE - 5;
put.setAttribute("visibility", new byte[bigTagLen]);
table.put(put);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
put1 = new Put(row2);
value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(rowd);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
put2 = new Put(rowe);
value2 = Bytes.toBytes("1000dfsddfdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
put.setAttribute("visibility", Bytes.toBytes("ram"));
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
TestCoprocessorForTags.checkTagPresence = true;
Scan s = new Scan(row);
s.setCaching(1);
ResultScanner scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
} finally {
if (table != null) {
table.close();
}
// delete the table
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestTags method testFlushAndCompactionWithoutTags.
@Test
public void testFlushAndCompactionWithoutTags() throws Exception {
Table table = null;
try {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
table.put(put);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(row2);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
Scan s = new Scan(row);
ResultScanner scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null)
scanner.close();
}
admin.compact(tableName);
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
s = new Scan(row);
scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null) {
scanner.close();
}
}
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testVisibilityLabelsWithDeleteColumnsWithMultipleVersions.
@Test
public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersions() throws Exception {
setAuths();
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = doPuts(tableName)) {
TEST_UTIL.getAdmin().flush(tableName);
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")"));
d.addColumns(fam, qual, 125l);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
TEST_UTIL.getAdmin().flush(tableName);
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 2);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 127l);
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 126l);
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 125l);
cellScanner = next[1].cellScanner();
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length));
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testDeleteFamilyLatestTimeStampWithMulipleVersions.
@Test
public void testDeleteFamilyLatestTimeStampWithMulipleVersions() throws Exception {
setAuths();
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = doPuts(tableName)) {
TEST_UTIL.getAdmin().flush(tableName);
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET));
d.addFamily(fam);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
TEST_UTIL.getAdmin().flush(tableName);
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 2);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 127l);
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 126l);
cellScanner = next[1].cellScanner();
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length));
}
}
use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method testVisibilityLabelsWithDeleteFamily.
@Test
public void testVisibilityLabelsWithDeleteFamily() throws Exception {
setAuths();
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET, CONFIDENTIAL + "|" + TOPSECRET)) {
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row2);
d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL));
d.addFamily(fam);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
TEST_UTIL.getAdmin().flush(tableName);
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 1);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length));
}
}
Aggregations