use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class ModifyTableProcedure method updateReplicaColumnsIfNeeded.
/**
* update replica column families if necessary.
* @param env MasterProcedureEnv
* @throws IOException
*/
private void updateReplicaColumnsIfNeeded(final MasterProcedureEnv env, final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor) throws IOException {
final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
final int newReplicaCount = newHTableDescriptor.getRegionReplication();
if (newReplicaCount < oldReplicaCount) {
Set<byte[]> tableRows = new HashSet<>();
Connection connection = env.getMasterServices().getConnection();
Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName());
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
ResultScanner resScanner = metaTable.getScanner(scan);
for (Result result : resScanner) {
tableRows.add(result.getRow());
}
MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, oldReplicaCount - newReplicaCount, connection);
}
}
// Setup replication for region replicas if needed
if (newReplicaCount > 1 && oldReplicaCount <= 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TableNamespaceManager method list.
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret = Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for (Result r : scanner) {
byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
ret.add(ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(val)));
}
} finally {
scanner.close();
}
return ret;
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestPartialResultsFromClientSide method testEquivalenceOfScanResults.
public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan scan2) throws Exception {
ResultScanner scanner1 = table.getScanner(scan1);
ResultScanner scanner2 = table.getScanner(scan2);
Result r1 = null;
Result r2 = null;
int count = 0;
while ((r1 = scanner1.next()) != null) {
r2 = scanner2.next();
assertTrue(r2 != null);
compareResults(r1, r2, "Comparing result #" + count);
count++;
}
r2 = scanner2.next();
assertTrue("r2: " + r2 + " Should be null", r2 == null);
scanner1.close();
scanner2.close();
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestPartialResultsFromClientSide method testReadPointAndPartialResults.
@Test
public void testReadPointAndPartialResults() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
int numRows = 5;
int numFamilies = 5;
int numQualifiers = 5;
byte[][] rows = HTestConst.makeNAscii(Bytes.toBytes("testRow"), numRows);
byte[][] families = HTestConst.makeNAscii(Bytes.toBytes("testFamily"), numFamilies);
byte[][] qualifiers = HTestConst.makeNAscii(Bytes.toBytes("testQualifier"), numQualifiers);
byte[] value = Bytes.createMaxByteArray(100);
Table tmpTable = createTestTable(tableName, rows, families, qualifiers, value);
// Open scanner before deletes
ResultScanner scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
// now the openScanner will also fetch data and will be executed lazily, i.e, only openScanner
// when you call next, so here we need to make a next call to open scanner. The maxResultSize
// limit can make sure that we will not fetch all the data at once, so the test sill works.
int scannerCount = scanner.next().rawCells().length;
Delete delete1 = new Delete(rows[0]);
delete1.addColumn(families[0], qualifiers[0], 0);
tmpTable.delete(delete1);
Delete delete2 = new Delete(rows[1]);
delete2.addColumn(families[1], qualifiers[1], 1);
tmpTable.delete(delete2);
// Should see all cells because scanner was opened prior to deletes
scannerCount += countCellsFromScanner(scanner);
int expectedCount = numRows * numFamilies * numQualifiers;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, scannerCount == expectedCount);
// Minus 2 for the two cells that were deleted
scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
scannerCount = countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers - 2;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, scannerCount == expectedCount);
scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
scannerCount = scanner.next().rawCells().length;
// Put in 2 new rows. The timestamps differ from the deleted rows
Put put1 = new Put(rows[0]);
put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value));
tmpTable.put(put1);
Put put2 = new Put(rows[1]);
put2.add(new KeyValue(rows[1], families[1], qualifiers[1], 2, value));
tmpTable.put(put2);
// Scanner opened prior to puts. Cell count shouldn't have changed
scannerCount += countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers - 2;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, scannerCount == expectedCount);
// Now the scanner should see the cells that were added by puts
scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
scannerCount = countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, scannerCount == expectedCount);
TEST_UTIL.deleteTable(tableName);
}
use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.
the class TestMultiVersions method testScanMultipleVersions.
/**
* Port of old TestScanMultipleVersions test here so can better utilize the
* spun up cluster running more than just a single test. Keep old tests
* crazyness.
*
* <p>Tests five cases of scans and timestamps.
* @throws Exception
*/
@Test
public void testScanMultipleVersions() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") };
final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") };
final long[] timestamp = new long[] { 100L, 1000L };
this.admin.createTable(desc, splitRows);
Table table = UTIL.getConnection().getTable(tableName);
// Assert we got the region layout wanted.
Pair<byte[][], byte[][]> keys = UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys();
assertEquals(2, keys.getFirst().length);
byte[][] startKeys = keys.getFirst();
byte[][] endKeys = keys.getSecond();
for (int i = 0; i < startKeys.length; i++) {
if (i == 0) {
assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, startKeys[i]));
assertTrue(Bytes.equals(endKeys[i], splitRows[0]));
} else if (i == 1) {
assertTrue(Bytes.equals(splitRows[0], startKeys[i]));
assertTrue(Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW));
}
}
// Insert data
List<Put> puts = new ArrayList<>();
for (int i = 0; i < startKeys.length; i++) {
for (int j = 0; j < timestamp.length; j++) {
Put put = new Put(rows[i], timestamp[j]);
put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j]));
puts.add(put);
}
}
table.put(puts);
// There are 5 cases we have to test. Each is described below.
for (int i = 0; i < rows.length; i++) {
for (int j = 0; j < timestamp.length; j++) {
Get get = new Get(rows[i]);
get.addFamily(HConstants.CATALOG_FAMILY);
get.setTimeStamp(timestamp[j]);
Result result = table.get(get);
int cellCount = 0;
for (@SuppressWarnings("unused") Cell kv : result.listCells()) {
cellCount++;
}
assertTrue(cellCount == 1);
}
}
// Case 1: scan with LATEST_TIMESTAMP. Should get two rows
int count = 0;
Scan scan = new Scan();
scan.addFamily(HConstants.CATALOG_FAMILY);
ResultScanner s = table.getScanner(scan);
try {
for (Result rr = null; (rr = s.next()) != null; ) {
System.out.println(rr.toString());
count += 1;
}
assertEquals("Number of rows should be 2", 2, count);
} finally {
s.close();
}
// Case 2: Scan with a timestamp greater than most recent timestamp
// (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
count = 0;
scan = new Scan();
scan.setTimeRange(1000L, Long.MAX_VALUE);
scan.addFamily(HConstants.CATALOG_FAMILY);
s = table.getScanner(scan);
try {
while (s.next() != null) {
count += 1;
}
assertEquals("Number of rows should be 2", 2, count);
} finally {
s.close();
}
// Case 3: scan with timestamp equal to most recent timestamp
// (in this case == 1000. Should get 2 rows.
count = 0;
scan = new Scan();
scan.setTimeStamp(1000L);
scan.addFamily(HConstants.CATALOG_FAMILY);
s = table.getScanner(scan);
try {
while (s.next() != null) {
count += 1;
}
assertEquals("Number of rows should be 2", 2, count);
} finally {
s.close();
}
// Case 4: scan with timestamp greater than first timestamp but less than
// second timestamp (100 < timestamp < 1000). Should get 2 rows.
count = 0;
scan = new Scan();
scan.setTimeRange(100L, 1000L);
scan.addFamily(HConstants.CATALOG_FAMILY);
s = table.getScanner(scan);
try {
while (s.next() != null) {
count += 1;
}
assertEquals("Number of rows should be 2", 2, count);
} finally {
s.close();
}
// Case 5: scan with timestamp equal to first timestamp (100)
// Should get 2 rows.
count = 0;
scan = new Scan();
scan.setTimeStamp(100L);
scan.addFamily(HConstants.CATALOG_FAMILY);
s = table.getScanner(scan);
try {
while (s.next() != null) {
count += 1;
}
assertEquals("Number of rows should be 2", 2, count);
} finally {
s.close();
}
}
Aggregations