Search in sources :

Example 41 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestImportExport method testTagsAddition.

/**
 *  Add cell tags to delete mutations, run export and import tool and
 *  verify that tags are present in import table also.
 * @throws Throwable throws Throwable.
 */
@Test
public void testTagsAddition() throws Throwable {
    final TableName exportTable = TableName.valueOf(name.getMethodName());
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
    UTIL.getAdmin().createTable(desc);
    Table exportT = UTIL.getConnection().getTable(exportTable);
    // Add first version of QUAL
    Put p = new Put(ROW1);
    p.addColumn(FAMILYA, QUAL, now, QUAL);
    exportT.put(p);
    // Add Delete family marker
    Delete d = new Delete(ROW1, now + 3);
    // Add test attribute to delete mutation.
    d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG));
    exportT.delete(d);
    // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool
    // will use KeyValueCodecWithTags.
    String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", // This will make sure that codec will encode and decode tags in rpc call.
    "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", exportTable.getNameAsString(), FQ_OUTPUT_DIR, // max number of key versions per key to export
    "1000" };
    assertTrue(runExport(args));
    // Assert tag exists in exportTable
    checkWhetherTagExists(exportTable, true);
    // Create an import table with MetadataController.
    final TableName importTable = TableName.valueOf("importWithTestTagsAddition");
    TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
    UTIL.getAdmin().createTable(importTableDesc);
    // Run import tool.
    args = new String[] { // This will make sure that codec will encode and decode tags in rpc call.
    "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", importTable.getNameAsString(), FQ_OUTPUT_DIR };
    assertTrue(runImport(args));
    // Make sure that tags exists in imported table.
    checkWhetherTagExists(importTable, true);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 42 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestImportExport method testTagsWithEmptyCodec.

/**
 * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string
 * This means it will use no Codec. Make sure that we don't return Tags in response.
 * @throws Exception Exception
 */
@Test
public void testTagsWithEmptyCodec() throws Exception {
    TableName tableName = TableName.valueOf(name.getMethodName());
    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
    UTIL.getAdmin().createTable(tableDesc);
    Configuration conf = new Configuration(UTIL.getConfiguration());
    conf.set(RPC_CODEC_CONF_KEY, "");
    conf.set(DEFAULT_CODEC_CLASS, "");
    try (Connection connection = ConnectionFactory.createConnection(conf);
        Table table = connection.getTable(tableName)) {
        // Add first version of QUAL
        Put p = new Put(ROW1);
        p.addColumn(FAMILYA, QUAL, now, QUAL);
        table.put(p);
        // Add Delete family marker
        Delete d = new Delete(ROW1, now + 3);
        // Add test attribute to delete mutation.
        d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG));
        table.delete(d);
        // Since RPC_CODEC_CONF_KEY and DEFAULT_CODEC_CLASS is set to empty, it will use
        // empty Codec and it shouldn't encode/decode tags.
        Scan scan = new Scan().withStartRow(ROW1).setRaw(true);
        ResultScanner scanner = table.getScanner(scan);
        int count = 0;
        Result result;
        while ((result = scanner.next()) != null) {
            List<Cell> cells = result.listCells();
            assertEquals(2, cells.size());
            Cell cell = cells.get(0);
            assertTrue(CellUtil.isDelete(cell));
            List<Tag> tags = PrivateCellUtil.getTags(cell);
            assertEquals(0, tags.size());
            count++;
        }
        assertEquals(1, count);
    } finally {
        UTIL.deleteTable(tableName);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) Connection(org.apache.hadoop.hbase.client.Connection) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell) MapReduceExtendedCell(org.apache.hadoop.hbase.util.MapReduceExtendedCell) Test(org.junit.Test)

Example 43 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestImportExport method testWithDeletes.

@Test
public void testWithDeletes() throws Throwable {
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        Put p = new Put(ROW1);
        p.addColumn(FAMILYA, QUAL, now, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
        t.put(p);
        Delete d = new Delete(ROW1, now + 3);
        t.delete(d);
        d = new Delete(ROW1);
        d.addColumns(FAMILYA, QUAL, now + 2);
        t.delete(d);
    }
    String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
    "1000" };
    assertTrue(runExport(args));
    final String IMPORT_TABLE = name.getMethodName() + "import";
    desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
        assertTrue(runImport(args));
        Scan s = new Scan();
        s.readAllVersions();
        s.setRaw(true);
        ResultScanner scanner = t.getScanner(s);
        Result r = scanner.next();
        Cell[] res = r.rawCells();
        assertTrue(PrivateCellUtil.isDeleteFamily(res[0]));
        assertEquals(now + 4, res[1].getTimestamp());
        assertEquals(now + 3, res[2].getTimestamp());
        assertTrue(CellUtil.isDelete(res[3]));
        assertEquals(now + 2, res[4].getTimestamp());
        assertEquals(now + 1, res[5].getTimestamp());
        assertEquals(now, res[6].getTimestamp());
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) MapReduceExtendedCell(org.apache.hadoop.hbase.util.MapReduceExtendedCell) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 44 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class SnapshotManager method cloneSnapshot.

/**
 * Clone the specified snapshot.
 * The clone will fail if the destination table has a snapshot or restore in progress.
 *
 * @param reqSnapshot Snapshot Descriptor from request
 * @param tableName table to clone
 * @param snapshot Snapshot Descriptor
 * @param snapshotTableDesc Table Descriptor
 * @param nonceKey unique identifier to prevent duplicated RPC
 * @return procId the ID of the clone snapshot procedure
 * @throws IOException
 */
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName, final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, final NonceKey nonceKey, final boolean restoreAcl, final String customSFT) throws IOException {
    MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
    TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
    org.apache.hadoop.hbase.client.SnapshotDescription snapshotPOJO = null;
    if (cpHost != null) {
        snapshotPOJO = ProtobufUtil.createSnapshotDesc(snapshot);
        cpHost.preCloneSnapshot(snapshotPOJO, htd);
    }
    long procId;
    try {
        procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl, customSFT);
    } catch (IOException e) {
        LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() + " as table " + tableName.getNameAsString(), e);
        throw e;
    }
    LOG.info("Clone snapshot=" + snapshot.getName() + " as table=" + tableName);
    if (cpHost != null) {
        cpHost.postCloneSnapshot(snapshotPOJO, htd);
    }
    return procId;
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 45 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class SnapshotManager method restoreOrCloneSnapshot.

/**
 * Restore or Clone the specified snapshot
 * @param reqSnapshot
 * @param nonceKey unique identifier to prevent duplicated RPC
 * @throws IOException
 */
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey, final boolean restoreAcl, String customSFT) throws IOException {
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
    // check if the snapshot exists
    if (!fs.exists(snapshotDir)) {
        LOG.error("A Snapshot named '" + reqSnapshot.getName() + "' does not exist.");
        throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
    }
    // Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
    // just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
    // information.
    SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
    SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
    TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
    TableName tableName = TableName.valueOf(reqSnapshot.getTable());
    // sanity check the new table descriptor
    TableDescriptorChecker.sanityCheck(master.getConfiguration(), snapshotTableDesc);
    // stop tracking "abandoned" handlers
    cleanupSentinels();
    // Verify snapshot validity
    SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
    // Execute the restore/clone operation
    long procId;
    if (master.getTableDescriptors().exists(tableName)) {
        procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl);
    } else {
        procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl, customSFT);
    }
    return procId;
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) SnapshotDoesNotExistException(org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39