use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestDeleteMobTable method testDeleteNonMobTable.
@Test
public void testDeleteNonMobTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor htd = createTableDescriptor(tableName, false);
ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY);
Table table = createTableWithOneFile(htd);
try {
// the mob file doesn't exist
Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
Assert.assertFalse(mobTableDirExist(tableName));
} finally {
table.close();
TEST_UTIL.deleteTable(tableName);
}
Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName));
Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
Assert.assertFalse(mobTableDirExist(tableName));
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestDeleteMobTable method testMobFamilyDelete.
@Test
public void testMobFamilyDelete() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor tableDescriptor = createTableDescriptor(tableName, true);
ColumnFamilyDescriptor familyDescriptor = tableDescriptor.getColumnFamily(FAMILY);
tableDescriptor = TableDescriptorBuilder.newBuilder(tableDescriptor).setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family2"))).build();
Table table = createTableWithOneFile(tableDescriptor);
try {
// the mob file exists
Assert.assertEquals(1, countMobFiles(tableName, familyDescriptor.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tableName, familyDescriptor.getNameAsString()));
String fileName = assertHasOneMobRow(table, tableName, familyDescriptor.getNameAsString());
Assert.assertFalse(mobArchiveExist(tableName, familyDescriptor.getNameAsString(), fileName));
Assert.assertTrue(mobTableDirExist(tableName));
TEST_UTIL.getAdmin().deleteColumnFamily(tableName, FAMILY);
Assert.assertEquals(0, countMobFiles(tableName, familyDescriptor.getNameAsString()));
Assert.assertEquals(1, countArchiveMobFiles(tableName, familyDescriptor.getNameAsString()));
Assert.assertTrue(mobArchiveExist(tableName, familyDescriptor.getNameAsString(), fileName));
Assert.assertFalse(mobColumnFamilyDirExist(tableName, familyDescriptor.getNameAsString()));
} finally {
table.close();
TEST_UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestHRegion method testGetWithFilter.
@Test
public void testGetWithFilter() throws IOException, InterruptedException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] col1 = Bytes.toBytes("col1");
byte[] value1 = Bytes.toBytes("value1");
byte[] value2 = Bytes.toBytes("value2");
final int maxVersions = 3;
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf("testFilterAndColumnTracker")).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setMaxVersions(maxVersions).build()).build();
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
final WAL wal = HBaseTestingUtil.createWal(TEST_UTIL.getConfiguration(), logDir, info);
this.region = TEST_UTIL.createLocalHRegion(info, CONF, tableDescriptor, wal);
// Put 4 version to memstore
long ts = 0;
Put put = new Put(row1, ts);
put.addColumn(fam1, col1, value1);
region.put(put);
put = new Put(row1, ts + 1);
put.addColumn(fam1, col1, Bytes.toBytes("filter1"));
region.put(put);
put = new Put(row1, ts + 2);
put.addColumn(fam1, col1, Bytes.toBytes("filter2"));
region.put(put);
put = new Put(row1, ts + 3);
put.addColumn(fam1, col1, value2);
region.put(put);
Get get = new Get(row1);
get.readAllVersions();
Result res = region.get(get);
// Get 3 versions, the oldest version has gone from user view
assertEquals(maxVersions, res.size());
get.setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value")));
res = region.get(get);
// When use value filter, the oldest version should still gone from user view and it
// should only return one key vaule
assertEquals(1, res.size());
assertTrue(CellUtil.matchingValue(new KeyValue(row1, fam1, col1, value2), res.rawCells()[0]));
assertEquals(ts + 3, res.rawCells()[0].getTimestamp());
region.flush(true);
region.compact(true);
Thread.sleep(1000);
res = region.get(get);
// After flush and compact, the result should be consistent with previous result
assertEquals(1, res.size());
assertTrue(CellUtil.matchingValue(new KeyValue(row1, fam1, col1, value2), res.rawCells()[0]));
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestRegionReplicaReplication method testRegionReplicaWithoutMemstoreReplication.
@Test
public void testRegionReplicaWithoutMemstoreReplication() throws Exception {
int regionReplication = 3;
TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName()).setRegionReplication(regionReplication).setRegionMemStoreReplication(false).build();
createOrEnableTableWithRetries(htd, true);
final TableName tableName = htd.getTableName();
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
try {
// write data to the primary. The replicas should not receive the data
final int STEP = 100;
for (int i = 0; i < 3; ++i) {
final int startRow = i * STEP;
final int endRow = (i + 1) * STEP;
LOG.info("Writing data from " + startRow + " to " + endRow);
HTU.loadNumericRows(table, HBaseTestingUtil.fam1, startRow, endRow);
verifyReplication(tableName, regionReplication, startRow, endRow, false);
// Flush the table, now the data should show up in the replicas
LOG.info("flushing table");
HTU.flush(tableName);
verifyReplication(tableName, regionReplication, 0, endRow, true);
}
} finally {
table.close();
connection.close();
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestRegionReplicaReplication method testRegionReplicaReplication.
private void testRegionReplicaReplication(int regionReplication) throws Exception {
// test region replica replication. Create a table with single region, write some data
// ensure that data is replicated to the secondary region
TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_" + regionReplication);
TableDescriptor htd = HTU.createModifyableTableDescriptor(TableName.valueOf(tableName.toString()), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED).setRegionReplication(regionReplication).build();
createOrEnableTableWithRetries(htd, true);
TableName tableNameNoReplicas = TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
HTU.deleteTableIfAny(tableNameNoReplicas);
HTU.createTable(tableNameNoReplicas, HBaseTestingUtil.fam1);
try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableNoReplicas = connection.getTable(tableNameNoReplicas)) {
// load some data to the non-replicated table
HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtil.fam1, 6000, 7000);
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtil.fam1, 0, 1000);
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
HTU.deleteTableIfAny(tableNameNoReplicas);
}
}
Aggregations