use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestImportExport method testTagsAddition.
/**
* Add cell tags to delete mutations, run export and import tool and
* verify that tags are present in import table also.
* @throws Throwable throws Throwable.
*/
@Test
public void testTagsAddition() throws Throwable {
final TableName exportTable = TableName.valueOf(name.getMethodName());
TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
UTIL.getAdmin().createTable(desc);
Table exportT = UTIL.getConnection().getTable(exportTable);
// Add first version of QUAL
Put p = new Put(ROW1);
p.addColumn(FAMILYA, QUAL, now, QUAL);
exportT.put(p);
// Add Delete family marker
Delete d = new Delete(ROW1, now + 3);
// Add test attribute to delete mutation.
d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG));
exportT.delete(d);
// Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool
// will use KeyValueCodecWithTags.
String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", // This will make sure that codec will encode and decode tags in rpc call.
"-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", exportTable.getNameAsString(), FQ_OUTPUT_DIR, // max number of key versions per key to export
"1000" };
assertTrue(runExport(args));
// Assert tag exists in exportTable
checkWhetherTagExists(exportTable, true);
// Create an import table with MetadataController.
final TableName importTable = TableName.valueOf("importWithTestTagsAddition");
TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
UTIL.getAdmin().createTable(importTableDesc);
// Run import tool.
args = new String[] { // This will make sure that codec will encode and decode tags in rpc call.
"-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", importTable.getNameAsString(), FQ_OUTPUT_DIR };
assertTrue(runImport(args));
// Make sure that tags exists in imported table.
checkWhetherTagExists(importTable, true);
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestImportExport method testTagsWithEmptyCodec.
/**
* Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string
* This means it will use no Codec. Make sure that we don't return Tags in response.
* @throws Exception Exception
*/
@Test
public void testTagsWithEmptyCodec() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).setCoprocessor(MetadataController.class.getName()).build();
UTIL.getAdmin().createTable(tableDesc);
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RPC_CODEC_CONF_KEY, "");
conf.set(DEFAULT_CODEC_CLASS, "");
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
// Add first version of QUAL
Put p = new Put(ROW1);
p.addColumn(FAMILYA, QUAL, now, QUAL);
table.put(p);
// Add Delete family marker
Delete d = new Delete(ROW1, now + 3);
// Add test attribute to delete mutation.
d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG));
table.delete(d);
// Since RPC_CODEC_CONF_KEY and DEFAULT_CODEC_CLASS is set to empty, it will use
// empty Codec and it shouldn't encode/decode tags.
Scan scan = new Scan().withStartRow(ROW1).setRaw(true);
ResultScanner scanner = table.getScanner(scan);
int count = 0;
Result result;
while ((result = scanner.next()) != null) {
List<Cell> cells = result.listCells();
assertEquals(2, cells.size());
Cell cell = cells.get(0);
assertTrue(CellUtil.isDelete(cell));
List<Tag> tags = PrivateCellUtil.getTags(cell);
assertEquals(0, tags.size());
count++;
}
assertEquals(1, count);
} finally {
UTIL.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestImportExport method testWithDeletes.
@Test
public void testWithDeletes() throws Throwable {
TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
Put p = new Put(ROW1);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
Delete d = new Delete(ROW1, now + 3);
t.delete(d);
d = new Delete(ROW1);
d.addColumns(FAMILYA, QUAL, now + 2);
t.delete(d);
}
String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
"1000" };
assertTrue(runExport(args));
final String IMPORT_TABLE = name.getMethodName() + "import";
desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
assertTrue(runImport(args));
Scan s = new Scan();
s.readAllVersions();
s.setRaw(true);
ResultScanner scanner = t.getScanner(s);
Result r = scanner.next();
Cell[] res = r.rawCells();
assertTrue(PrivateCellUtil.isDeleteFamily(res[0]));
assertEquals(now + 4, res[1].getTimestamp());
assertEquals(now + 3, res[2].getTimestamp());
assertTrue(CellUtil.isDelete(res[3]));
assertEquals(now + 2, res[4].getTimestamp());
assertEquals(now + 1, res[5].getTimestamp());
assertEquals(now, res[6].getTimestamp());
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class SnapshotManager method cloneSnapshot.
/**
* Clone the specified snapshot.
* The clone will fail if the destination table has a snapshot or restore in progress.
*
* @param reqSnapshot Snapshot Descriptor from request
* @param tableName table to clone
* @param snapshot Snapshot Descriptor
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
* @throws IOException
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName, final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc, final NonceKey nonceKey, final boolean restoreAcl, final String customSFT) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
org.apache.hadoop.hbase.client.SnapshotDescription snapshotPOJO = null;
if (cpHost != null) {
snapshotPOJO = ProtobufUtil.createSnapshotDesc(snapshot);
cpHost.preCloneSnapshot(snapshotPOJO, htd);
}
long procId;
try {
procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl, customSFT);
} catch (IOException e) {
LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() + " as table " + tableName.getNameAsString(), e);
throw e;
}
LOG.info("Clone snapshot=" + snapshot.getName() + " as table=" + tableName);
if (cpHost != null) {
cpHost.postCloneSnapshot(snapshotPOJO, htd);
}
return procId;
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class SnapshotManager method restoreOrCloneSnapshot.
/**
* Restore or Clone the specified snapshot
* @param reqSnapshot
* @param nonceKey unique identifier to prevent duplicated RPC
* @throws IOException
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey, final boolean restoreAcl, String customSFT) throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
// check if the snapshot exists
if (!fs.exists(snapshotDir)) {
LOG.error("A Snapshot named '" + reqSnapshot.getName() + "' does not exist.");
throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
}
// Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
// just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
// information.
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// sanity check the new table descriptor
TableDescriptorChecker.sanityCheck(master.getConfiguration(), snapshotTableDesc);
// stop tracking "abandoned" handlers
cleanupSentinels();
// Verify snapshot validity
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
long procId;
if (master.getTableDescriptors().exists(tableName)) {
procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl);
} else {
procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl, customSFT);
}
return procId;
}
Aggregations