use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestNamespaceAuditor method testRestoreSnapshot.
@Test
public void testRestoreSnapshot() throws Exception {
String nsp = prefix + "_testRestoreSnapshot";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build();
ADMIN.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
TableName tableName1 = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1");
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build();
TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableName1);
tableDescOne.setColumnFamily(columnFamilyDescriptor);
ADMIN.createTable(tableDescOne.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
NamespaceTableAndRegionInfo nstate = getNamespaceState(nsp);
assertEquals("Intial region count should be 4.", 4, nstate.getRegionCount());
String snapshot = "snapshot_testRestoreSnapshot";
ADMIN.snapshot(snapshot, tableName1);
List<RegionInfo> regions = ADMIN.getRegions(tableName1);
Collections.sort(regions, RegionInfo.COMPARATOR);
ADMIN.split(tableName1, Bytes.toBytes("JJJ"));
Thread.sleep(2000);
assertEquals("Total regions count should be 5.", 5, nstate.getRegionCount());
ADMIN.disableTable(tableName1);
ADMIN.restoreSnapshot(snapshot);
assertEquals("Total regions count should be 4 after restore.", 4, nstate.getRegionCount());
ADMIN.enableTable(tableName1);
ADMIN.deleteSnapshot(snapshot);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestNamespaceAuditor method testTableOperations.
@Test
public void testTableOperations() throws Exception {
String nsp = prefix + "_np2";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
assertEquals(3, ADMIN.listNamespaceDescriptors().length);
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build();
TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"));
tableDescOne.setColumnFamily(columnFamilyDescriptor);
TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder.newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"));
tableDescTwo.setColumnFamily(columnFamilyDescriptor);
TableDescriptorBuilder tableDescThree = TableDescriptorBuilder.newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3"));
tableDescThree.setColumnFamily(columnFamilyDescriptor);
ADMIN.createTable(tableDescOne.build());
boolean constraintViolated = false;
try {
ADMIN.createTable(tableDescTwo.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5);
} catch (Exception exp) {
assertTrue(exp instanceof IOException);
constraintViolated = true;
} finally {
assertTrue("Constraint not violated for table " + tableDescTwo.build().getTableName(), constraintViolated);
}
ADMIN.createTable(tableDescTwo.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
NamespaceTableAndRegionInfo nspState = getQuotaManager().getState(nsp);
assertNotNull(nspState);
assertTrue(nspState.getTables().size() == 2);
assertTrue(nspState.getRegionCount() == 5);
constraintViolated = false;
try {
ADMIN.createTable(tableDescThree.build());
} catch (Exception exp) {
assertTrue(exp instanceof IOException);
constraintViolated = true;
} finally {
assertTrue("Constraint not violated for table " + tableDescThree.build().getTableName(), constraintViolated);
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestNamespaceAuditor method testExceedTableQuotaInNamespace.
@Test(expected = QuotaExceededException.class)
public void testExceedTableQuotaInNamespace() throws Exception {
String nsp = prefix + "_testExceedTableQuotaInNamespace";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build();
ADMIN.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
assertEquals(3, ADMIN.listNamespaceDescriptors().length);
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build();
TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"));
tableDescOne.setColumnFamily(columnFamilyDescriptor);
TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder.newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"));
tableDescTwo.setColumnFamily(columnFamilyDescriptor);
ADMIN.createTable(tableDescOne.build());
ADMIN.createTable(tableDescTwo.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestStripeCompactor method createCompactor.
private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
final Scanner scanner = new Scanner(input);
// Create store mock that is satisfactory for compactor.
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(NAME_OF_THINGS);
ScanInfo si = new ScanInfo(conf, familyDescriptor, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR);
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor()).thenReturn(familyDescriptor);
when(store.getScanInfo()).thenReturn(si);
when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
StoreEngine storeEngine = mock(StoreEngine.class);
when(storeEngine.createWriter(any(CreateStoreFileWriterParams.class))).thenAnswer(writers);
when(store.getStoreEngine()).thenReturn(storeEngine);
when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);
return new StripeCompactor(conf, store) {
@Override
protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
return scanner;
}
@Override
protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
return scanner;
}
};
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestBulkLoadHFiles method testSplitStoreFileWithDifferentEncoding.
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding, DataBlockEncoding cfEncoding) throws IOException {
Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
FileSystem fs = util.getTestFileSystem();
Path testIn = new Path(dir, "testhfile");
ColumnFamilyDescriptor familyDesc = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build();
HFileTestUtil.createHFileWithDataBlockEncoding(util.getConfiguration(), fs, testIn, bulkloadEncoding, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
Path bottomOut = new Path(dir, "bottom.out");
Path topOut = new Path(dir, "top.out");
BulkLoadHFilesTool.splitStoreFile(util.getConfiguration(), testIn, familyDesc, Bytes.toBytes("ggg"), bottomOut, topOut);
int rowCount = verifyHFile(bottomOut);
rowCount += verifyHFile(topOut);
assertEquals(1000, rowCount);
}
Aggregations