use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.
@Test(timeout = 300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<Region> regions = new ArrayList<>();
regions.add(region);
when(rss.getOnlineRegions()).thenReturn(regions);
final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAdmin1 method testSplitAndMergeWithReplicaTable.
@Test
public void testSplitAndMergeWithReplicaTable() throws Exception {
// The test tries to directly split replica regions and directly merge replica regions. These
// are not allowed. The test validates that. Then the test does a valid split/merge of allowed
// regions.
// Set up a table with 3 regions and replication set to 3
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setRegionReplication(3);
byte[] cf = "f".getBytes();
HColumnDescriptor hcd = new HColumnDescriptor(cf);
desc.addFamily(hcd);
byte[][] splitRows = new byte[2][];
splitRows[0] = new byte[] { (byte) '4' };
splitRows[1] = new byte[] { (byte) '7' };
TEST_UTIL.getAdmin().createTable(desc, splitRows);
List<HRegion> oldRegions;
do {
oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
Thread.sleep(10);
} while (//3 regions * 3 replicas
oldRegions.size() != 9);
// write some data to the table
Table ht = TEST_UTIL.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<>();
byte[] qualifier = "c".getBytes();
Put put = new Put(new byte[] { (byte) '1' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[] { (byte) '6' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
put = new Put(new byte[] { (byte) '8' });
put.addColumn(cf, qualifier, "100".getBytes());
puts.add(put);
ht.put(puts);
ht.close();
List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName);
boolean gotException = false;
// regions). Try splitting that region via the split API . Should fail
try {
TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName());
} catch (IllegalArgumentException ex) {
gotException = true;
}
assertTrue(gotException);
gotException = false;
// this API goes direct to the regionserver skipping any checks in the admin). Should fail
try {
TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), new byte[] { (byte) '1' });
} catch (IOException ex) {
gotException = true;
}
assertTrue(gotException);
gotException = false;
// Try merging a replica with another. Should fail.
try {
TEST_UTIL.getHBaseAdmin().mergeRegionsSync(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true);
} catch (IllegalArgumentException m) {
gotException = true;
}
assertTrue(gotException);
// Try going to the master directly (that will skip the check in admin)
try {
byte[][] nameofRegionsToMerge = new byte[2][];
nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes();
nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest(nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster().mergeTableRegions(null, request);
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) {
Throwable t = m.getCause();
do {
if (t instanceof MergeRegionException) {
gotException = true;
break;
}
t = t.getCause();
} while (t != null);
}
assertTrue(gotException);
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAdmin1 method testHFileReplication.
/*
* Test DFS replication for column families, where one CF has default replication(3) and the other
* is set to 1.
*/
@Test(timeout = 300000)
public void testHFileReplication() throws Exception {
final TableName tableName = TableName.valueOf(this.name.getMethodName());
String fn1 = "rep1";
HColumnDescriptor hcd1 = new HColumnDescriptor(fn1);
hcd1.setDFSReplication((short) 1);
String fn = "defaultRep";
HColumnDescriptor hcd = new HColumnDescriptor(fn);
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(hcd);
htd.addFamily(hcd1);
Table table = TEST_UTIL.createTable(htd, null);
TEST_UTIL.waitTableAvailable(tableName);
Put p = new Put(Bytes.toBytes("defaultRep_rk"));
byte[] q1 = Bytes.toBytes("q1");
byte[] v1 = Bytes.toBytes("v1");
p.addColumn(Bytes.toBytes(fn), q1, v1);
List<Put> puts = new ArrayList<>(2);
puts.add(p);
p = new Put(Bytes.toBytes("rep1_rk"));
p.addColumn(Bytes.toBytes(fn1), q1, v1);
puts.add(p);
try {
table.put(puts);
admin.flush(tableName);
List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName);
for (HRegion r : regions) {
Store store = r.getStore(Bytes.toBytes(fn));
for (StoreFile sf : store.getStorefiles()) {
assertTrue(sf.toString().contains(fn));
assertTrue("Column family " + fn + " should have 3 copies", FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(), sf.getPath()) == (sf.getFileInfo().getFileStatus().getReplication()));
}
store = r.getStore(Bytes.toBytes(fn1));
for (StoreFile sf : store.getStorefiles()) {
assertTrue(sf.toString().contains(fn1));
assertTrue("Column family " + fn1 + " should have only 1 copy", 1 == sf.getFileInfo().getFileStatus().getReplication());
}
}
} finally {
if (admin.isTableEnabled(tableName)) {
this.admin.disableTable(tableName);
this.admin.deleteTable(tableName);
}
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAdmin1 method testTableAvailableWithRandomSplitKeys.
@Test(timeout = 300000)
public void testTableAvailableWithRandomSplitKeys() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("col"));
byte[][] splitKeys = new byte[1][];
splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } };
admin.createTable(desc);
boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys);
assertFalse("Table should be created with 1 row in META", tableAvailable);
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestAdmin1 method testOnlineChangeTableSchema.
/**
* Verify schema modification takes.
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout = 300000)
public void testOnlineChangeTableSchema() throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor[] tables = admin.listTables();
int numTables = tables.length;
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
tables = this.admin.listTables();
assertEquals(numTables + 1, tables.length);
// FIRST, do htabledescriptor changes.
HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
// Make a copy and assert copy is good.
HTableDescriptor copy = new HTableDescriptor(htd);
assertTrue(htd.equals(copy));
// Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <= 0) {
newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy.setMemStoreFlushSize(newFlushSize);
final String key = "anyoldkey";
assertTrue(htd.getValue(key) == null);
copy.setValue(key, key);
boolean expectedException = false;
try {
admin.modifyTable(tableName, copy);
} catch (TableNotDisabledException re) {
expectedException = true;
}
assertFalse(expectedException);
HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(tableName);
assertFalse(htd.equals(modifiedHtd));
assertTrue(copy.equals(modifiedHtd));
assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize());
assertEquals(key, modifiedHtd.getValue(key));
// Now work on column family changes.
int countOfFamilies = modifiedHtd.getFamilies().size();
assertTrue(countOfFamilies > 0);
HColumnDescriptor hcd = modifiedHtd.getFamilies().iterator().next();
int maxversions = hcd.getMaxVersions();
final int newMaxVersions = maxversions + 1;
hcd.setMaxVersions(newMaxVersions);
final byte[] hcdName = hcd.getName();
expectedException = false;
try {
this.admin.modifyColumnFamily(tableName, hcd);
} catch (TableNotDisabledException re) {
expectedException = true;
}
assertFalse(expectedException);
modifiedHtd = this.admin.getTableDescriptor(tableName);
HColumnDescriptor modifiedHcd = modifiedHtd.getFamily(hcdName);
assertEquals(newMaxVersions, modifiedHcd.getMaxVersions());
// Try adding a column
assertFalse(this.admin.isTableDisabled(tableName));
final String xtracolName = "xtracol";
HColumnDescriptor xtracol = new HColumnDescriptor(xtracolName);
xtracol.setValue(xtracolName, xtracolName);
expectedException = false;
try {
this.admin.addColumnFamily(tableName, xtracol);
} catch (TableNotDisabledException re) {
expectedException = true;
}
// Add column should work even if the table is enabled
assertFalse(expectedException);
modifiedHtd = this.admin.getTableDescriptor(tableName);
hcd = modifiedHtd.getFamily(xtracol.getName());
assertTrue(hcd != null);
assertTrue(hcd.getValue(xtracolName).equals(xtracolName));
// Delete the just-added column.
this.admin.deleteColumnFamily(tableName, xtracol.getName());
modifiedHtd = this.admin.getTableDescriptor(tableName);
hcd = modifiedHtd.getFamily(xtracol.getName());
assertTrue(hcd == null);
// Delete the table
this.admin.disableTable(tableName);
this.admin.deleteTable(tableName);
this.admin.listTables();
assertFalse(this.admin.tableExists(tableName));
}
Aggregations