Search in sources :

Example 6 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestZstdDictionarySplitMerge method test.

@Test
public void test() throws Exception {
    // Create the table
    final TableName tableName = TableName.valueOf("TestZstdDictionarySplitMerge");
    final byte[] cfName = Bytes.toBytes("info");
    final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict";
    final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName).setCompressionType(Compression.Algorithm.ZSTD).setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()).build();
    final Admin admin = TEST_UTIL.getAdmin();
    admin.createTable(td, new byte[][] { Bytes.toBytes(1) });
    TEST_UTIL.waitTableAvailable(tableName);
    // Load some data
    Table t = ConnectionFactory.createConnection(conf).getTable(tableName);
    TEST_UTIL.loadNumericRows(t, cfName, 0, 100_000);
    admin.flush(tableName);
    assertTrue("Dictionary was not loaded", DictionaryCache.contains(dictionaryPath));
    TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
    // Test split procedure
    admin.split(tableName, Bytes.toBytes(50_000));
    TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
        }

        @Override
        public String explainFailure() throws Exception {
            return "Split has not finished yet";
        }
    });
    TEST_UTIL.waitUntilNoRegionsInTransition();
    TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
    // Test merge procedure
    RegionInfo regionA = null;
    RegionInfo regionB = null;
    for (RegionInfo region : admin.getRegions(tableName)) {
        if (region.getStartKey().length == 0) {
            regionA = region;
        } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
            regionB = region;
        }
    }
    assertNotNull(regionA);
    assertNotNull(regionB);
    admin.mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false).get(30, TimeUnit.SECONDS);
    assertEquals(2, admin.getRegions(tableName).size());
    ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
    assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
    try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) {
        assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
    }
    TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ServerName(org.apache.hadoop.hbase.ServerName) AsyncConnection(org.apache.hadoop.hbase.client.AsyncConnection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 7 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class SchemaResource method replace.

private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
    if (servlet.isReadOnly()) {
        return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
    }
    try {
        TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(name);
        for (Map.Entry<QName, Object> e : model.getAny().entrySet()) {
            tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
        }
        for (ColumnSchemaModel family : model.getColumns()) {
            ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
            for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
                columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
            }
            tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
        }
        TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
        if (admin.tableExists(name)) {
            admin.disableTable(name);
            admin.modifyTable(tableDescriptor);
            admin.enableTable(name);
            servlet.getMetrics().incrementSucessfulPutRequests(1);
        } else {
            try {
                admin.createTable(tableDescriptor);
                servlet.getMetrics().incrementSucessfulPutRequests(1);
            } catch (TableExistsException e) {
                // race, someone else created a table with the same name
                return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Not modified" + CRLF).build();
            }
        }
        return Response.created(uriInfo.getAbsolutePath()).build();
    } catch (Exception e) {
        LOG.info("Caught exception", e);
        servlet.getMetrics().incrementFailedPutRequests(1);
        return processException(e);
    }
}
Also used : QName(javax.xml.namespace.QName) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TableExistsException(org.apache.hadoop.hbase.TableExistsException) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) Map(java.util.Map) ColumnSchemaModel(org.apache.hadoop.hbase.rest.model.ColumnSchemaModel) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) WebApplicationException(org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException) IOException(java.io.IOException)

Example 8 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class SchemaResource method update.

private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) {
    if (servlet.isReadOnly()) {
        return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build();
    }
    try {
        TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(admin.getDescriptor(name));
        admin.disableTable(name);
        try {
            for (ColumnSchemaModel family : model.getColumns()) {
                ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
                for (Map.Entry<QName, Object> e : family.getAny().entrySet()) {
                    columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
                }
                TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
                ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorBuilder.build();
                if (tableDescriptor.hasColumnFamily(columnFamilyDescriptor.getName())) {
                    admin.modifyColumnFamily(name, columnFamilyDescriptor);
                } else {
                    admin.addColumnFamily(name, columnFamilyDescriptor);
                }
            }
        } catch (IOException e) {
            return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
        } finally {
            admin.enableTable(TableName.valueOf(tableResource.getName()));
        }
        servlet.getMetrics().incrementSucessfulPutRequests(1);
        return Response.ok().build();
    } catch (Exception e) {
        servlet.getMetrics().incrementFailedPutRequests(1);
        return processException(e);
    }
}
Also used : QName(javax.xml.namespace.QName) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) ColumnSchemaModel(org.apache.hadoop.hbase.rest.model.ColumnSchemaModel) Map(java.util.Map) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) WebApplicationException(org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException) IOException(java.io.IOException)

Example 9 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class RestoreTool method incrementalRestoreTable.

/**
 * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
 * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
 * the future
 * @param conn HBase connection
 * @param tableBackupPath backup path
 * @param logDirs : incremental backup folders, which contains WAL
 * @param tableNames : source tableNames(table names were backuped)
 * @param newTableNames : target tableNames(table names to be restored to)
 * @param incrBackupId incremental backup Id
 * @throws IOException exception
 */
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
    try (Admin admin = conn.getAdmin()) {
        if (tableNames.length != newTableNames.length) {
            throw new IOException("Number of source tables and target tables does not match!");
        }
        FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
        // full backup. Here, check that all new tables exists
        for (TableName tableName : newTableNames) {
            if (!admin.tableExists(tableName)) {
                throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup.");
            }
        }
        // adjust table schema
        for (int i = 0; i < tableNames.length; i++) {
            TableName tableName = tableNames[i];
            TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
            if (tableDescriptor == null) {
                throw new IOException("Can't find " + tableName + "'s descriptor.");
            }
            LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
            TableName newTableName = newTableNames[i];
            TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
            List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
            List<ColumnFamilyDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies());
            TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
            boolean schemaChangeNeeded = false;
            for (ColumnFamilyDescriptor family : families) {
                if (!existingFamilies.contains(family)) {
                    builder.setColumnFamily(family);
                    schemaChangeNeeded = true;
                }
            }
            for (ColumnFamilyDescriptor family : existingFamilies) {
                if (!families.contains(family)) {
                    builder.removeColumnFamily(family.getName());
                    schemaChangeNeeded = true;
                }
            }
            if (schemaChangeNeeded) {
                modifyTableSync(conn, builder.build());
                LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
            }
        }
        RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
        restoreService.run(logDirs, tableNames, newTableNames, false);
    }
}
Also used : RestoreJob(org.apache.hadoop.hbase.backup.RestoreJob) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 10 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class RestoreTool method getTableDesc.

/**
 * Get table descriptor
 * @param tableName is the table backed up
 * @return {@link TableDescriptor} saved in backup image of the table
 */
TableDescriptor getTableDesc(TableName tableName) throws IOException {
    Path tableInfoPath = this.getTableInfoPath(tableName);
    SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
    TableDescriptor tableDescriptor = manifest.getTableDescriptor();
    if (!tableDescriptor.getTableName().equals(tableName)) {
        LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
        LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
        throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
    }
    return tableDescriptor;
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileNotFoundException(java.io.FileNotFoundException) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39