use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class TestClassLoading method testClassLoadingFromLocalFS.
@Test
public // HBASE-3516: Test CP Class loading from local file system
void testClassLoadingFromLocalFS() throws Exception {
File jarFile = buildCoprocessorJar(cpName3);
// create a table that references the jar
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName3));
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER);
TableDescriptor tableDescriptor = tdb.build();
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessor was loaded
boolean found = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) {
found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
}
}
assertTrue("Class " + cpName3 + " was missing on a region", found);
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class TestClassLoading method loadingClassFromLibDirInJar.
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
FileSystem fs = cluster.getFileSystem();
File innerJarFile1 = buildCoprocessorJar(cpName1);
File innerJarFile2 = buildCoprocessorJar(cpName2);
File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2);
// copy the jars into dfs
fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR));
String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName();
assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS)));
LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
// create a table that references the coprocessors
TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName);
tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build());
// without configuration values
tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER);
// with configuration values
tdb.setValue("COPROCESSOR$2", jarFileOnHDFS + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
TableDescriptor tableDescriptor = tdb.build();
admin.createTable(tableDescriptor);
waitForTable(tableDescriptor.getTableName());
// verify that the coprocessors were loaded
boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false;
SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
CoprocessorEnvironment env;
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
if (env != null) {
found1 = true;
}
env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
if (env != null) {
found2 = true;
Configuration conf = env.getConfiguration();
found2_k1 = conf.get("k1") != null;
found2_k2 = conf.get("k2") != null;
found2_k3 = conf.get("k3") != null;
}
}
}
assertTrue("Class " + cpName1 + " was missing on a region", found1);
assertTrue("Class " + cpName2 + " was missing on a region", found2);
assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class SchemaResource method replace.
private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(name);
for (Map.Entry<QName, Object> e : model.getAny().entrySet()) {
tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel family : model.getColumns()) {
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
}
TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
if (admin.tableExists(name)) {
admin.disableTable(name);
admin.modifyTable(tableDescriptor);
admin.enableTable(name);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} else {
try {
admin.createTable(tableDescriptor);
servlet.getMetrics().incrementSucessfulPutRequests(1);
} catch (TableExistsException e) {
// race, someone else created a table with the same name
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Not modified" + CRLF).build();
}
}
return Response.created(uriInfo.getAbsolutePath()).build();
} catch (Exception e) {
LOG.info("Caught exception", e);
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class SchemaResource method update.
private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
if (servlet.isReadOnly()) {
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
}
try {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(admin.getDescriptor(name));
admin.disableTable(name);
try {
for (ColumnSchemaModel family : model.getColumns()) {
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorBuilder.build();
if (tableDescriptor.hasColumnFamily(columnFamilyDescriptor.getName())) {
admin.modifyColumnFamily(name, columnFamilyDescriptor);
} else {
admin.addColumnFamily(name, columnFamilyDescriptor);
}
}
} catch (IOException e) {
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
} finally {
admin.enableTable(TableName.valueOf(tableResource.getName()));
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.
the class RestoreTool method incrementalRestoreTable.
/**
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future
* @param conn HBase connection
* @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped)
* @param newTableNames : target tableNames(table names to be restored to)
* @param incrBackupId incremental backup Id
* @throws IOException exception
*/
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
try (Admin admin = conn.getAdmin()) {
if (tableNames.length != newTableNames.length) {
throw new IOException("Number of source tables and target tables does not match!");
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// full backup. Here, check that all new tables exists
for (TableName tableName : newTableNames) {
if (!admin.tableExists(tableName)) {
throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup.");
}
}
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
if (tableDescriptor == null) {
throw new IOException("Can't find " + tableName + "'s descriptor.");
}
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<ColumnFamilyDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
for (ColumnFamilyDescriptor family : families) {
if (!existingFamilies.contains(family)) {
builder.setColumnFamily(family);
schemaChangeNeeded = true;
}
}
for (ColumnFamilyDescriptor family : existingFamilies) {
if (!families.contains(family)) {
builder.removeColumnFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
modifyTableSync(conn, builder.build());
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
restoreService.run(logDirs, tableNames, newTableNames, false);
}
}
Aggregations