use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestZstdDictionarySplitMerge method test.
@Test
public void test() throws Exception {
// Create the table
final TableName tableName = TableName.valueOf("TestZstdDictionarySplitMerge");
final byte[] cfName = Bytes.toBytes("info");
final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict";
final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName).setCompressionType(Compression.Algorithm.ZSTD).setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()).build();
final Admin admin = TEST_UTIL.getAdmin();
admin.createTable(td, new byte[][] { Bytes.toBytes(1) });
TEST_UTIL.waitTableAvailable(tableName);
// Load some data
Table t = ConnectionFactory.createConnection(conf).getTable(tableName);
TEST_UTIL.loadNumericRows(t, cfName, 0, 100_000);
admin.flush(tableName);
assertTrue("Dictionary was not loaded", DictionaryCache.contains(dictionaryPath));
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
// Test split procedure
admin.split(tableName, Bytes.toBytes(50_000));
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
}
@Override
public String explainFailure() throws Exception {
return "Split has not finished yet";
}
});
TEST_UTIL.waitUntilNoRegionsInTransition();
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
// Test merge procedure
RegionInfo regionA = null;
RegionInfo regionB = null;
for (RegionInfo region : admin.getRegions(tableName)) {
if (region.getStartKey().length == 0) {
regionA = region;
} else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
regionB = region;
}
}
assertNotNull(regionA);
assertNotNull(regionB);
admin.mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false).get(30, TimeUnit.SECONDS);
assertEquals(2, admin.getRegions(tableName).size());
ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) {
assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
}
TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class SchemaResource method replace.
private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(name);
for (Map.Entry<QName, Object> e : model.getAny().entrySet()) {
tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel family : model.getColumns()) {
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
}
TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
if (admin.tableExists(name)) {
admin.disableTable(name);
admin.modifyTable(tableDescriptor);
admin.enableTable(name);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} else {
try {
admin.createTable(tableDescriptor);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} catch (TableExistsException e) {
// race, someone else created a table with the same name
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Not modified" + CRLF).build();
}
}
return Response.created(uriInfo.getAbsolutePath()).build();
} catch (Exception e) {
LOG.info("Caught exception", e);
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class SchemaResource method update.
private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(admin.getDescriptor(name));
admin.disableTable(name);
try {
for (ColumnSchemaModel family : model.getColumns()) {
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorBuilder.build();
if (tableDescriptor.hasColumnFamily(columnFamilyDescriptor.getName())) {
admin.modifyColumnFamily(name, columnFamilyDescriptor);
} else {
admin.addColumnFamily(name, columnFamilyDescriptor);
}
}
} catch (IOException e) {
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
} finally {
admin.enableTable(TableName.valueOf(tableResource.getName()));
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class RestoreTool method incrementalRestoreTable.
/**
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future
* @param conn HBase connection
* @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped)
* @param newTableNames : target tableNames(table names to be restored to)
* @param incrBackupId incremental backup Id
* @throws IOException exception
*/
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
try (Admin admin = conn.getAdmin()) {
if (tableNames.length != newTableNames.length) {
throw new IOException("Number of source tables and target tables does not match!");
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// full backup. Here, check that all new tables exists
for (TableName tableName : newTableNames) {
if (!admin.tableExists(tableName)) {
throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup.");
}
}
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
if (tableDescriptor == null) {
throw new IOException("Can't find " + tableName + "'s descriptor.");
}
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<ColumnFamilyDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
for (ColumnFamilyDescriptor family : families) {
if (!existingFamilies.contains(family)) {
builder.setColumnFamily(family);
schemaChangeNeeded = true;
}
}
for (ColumnFamilyDescriptor family : existingFamilies) {
if (!families.contains(family)) {
builder.removeColumnFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
modifyTableSync(conn, builder.build());
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
restoreService.run(logDirs, tableNames, newTableNames, false);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class RestoreTool method getTableDesc.
/**
* Get table descriptor
* @param tableName is the table backed up
* @return {@link TableDescriptor} saved in backup image of the table
*/
TableDescriptor getTableDesc(TableName tableName) throws IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
TableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
Aggregations