use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestDeleteColumnFamilyProcedureFromClient method deleteColumnFamilyWithMultipleRegions.
@Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception {
Admin admin = TEST_UTIL.getAdmin();
TableDescriptor beforehtd = admin.getDescriptor(TABLENAME);
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
// 1 - Check if table exists in descriptor
assertTrue(admin.isTableAvailable(TABLENAME));
// 2 - Check if all three families exist in descriptor
assertEquals(3, beforehtd.getColumnFamilyCount());
ColumnFamilyDescriptor[] families = beforehtd.getColumnFamilies();
for (int i = 0; i < families.length; i++) {
assertTrue(families[i].getNameAsString().equals("cf" + (i + 1)));
}
// 3 - Check if table exists in FS
Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME);
assertTrue(fs.exists(tableDir));
// 4 - Check if all the 3 column families exist in FS
FileStatus[] fileStatus = fs.listStatus(tableDir);
for (int i = 0; i < fileStatus.length; i++) {
if (fileStatus[i].isDirectory() == true) {
FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {
@Override
public boolean accept(Path p) {
if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
return false;
}
return true;
}
});
int k = 1;
for (int j = 0; j < cf.length; j++) {
if (cf[j].isDirectory() == true && cf[j].getPath().getName().startsWith(".") == false) {
assertEquals(cf[j].getPath().getName(), "cf" + k);
k++;
}
}
}
}
// TEST - Disable and delete the column family
admin.disableTable(TABLENAME);
admin.deleteColumnFamily(TABLENAME, Bytes.toBytes("cf2"));
// 5 - Check if only 2 column families exist in the descriptor
TableDescriptor afterhtd = admin.getDescriptor(TABLENAME);
assertEquals(2, afterhtd.getColumnFamilyCount());
ColumnFamilyDescriptor[] newFamilies = afterhtd.getColumnFamilies();
assertTrue(newFamilies[0].getNameAsString().equals("cf1"));
assertTrue(newFamilies[1].getNameAsString().equals("cf3"));
// 6 - Check if the second column family is gone from the FS
fileStatus = fs.listStatus(tableDir);
for (int i = 0; i < fileStatus.length; i++) {
if (fileStatus[i].isDirectory() == true) {
FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {
@Override
public boolean accept(Path p) {
if (WALSplitUtil.isSequenceIdFile(p)) {
return false;
}
return true;
}
});
for (int j = 0; j < cf.length; j++) {
if (cf[j].isDirectory() == true) {
assertFalse(cf[j].getPath().getName().equals("cf2"));
}
}
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestSecureBulkLoadManager method prepareHFile.
private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception {
TableDescriptor desc = testUtil.getAdmin().getDescriptor(TABLE);
ColumnFamilyDescriptor family = desc.getColumnFamily(FAMILY);
Compression.Algorithm compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
CacheConfig writerCacheConf = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP);
writerCacheConf.setCacheDataOnWrite(false);
HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(false).withIncludesTags(true).withCompression(compression).withCompressTags(family.isCompressTags()).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(family.getBlocksize()).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(Encryption.Context.NONE).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, dir.getFileSystem(conf)).withOutputDir(new Path(dir, family.getNameAsString())).withBloomType(family.getBloomFilterType()).withMaxKeyCount(Integer.MAX_VALUE).withFileContext(hFileContext);
StoreFileWriter writer = builder.build();
Put put = new Put(key);
put.addColumn(FAMILY, COLUMN, value);
for (Cell c : put.get(FAMILY, COLUMN)) {
writer.append(c);
}
writer.close();
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestScanWithBloomError method testThreeStoreFiles.
@Test
public void testThreeStoreFiles() throws IOException {
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)).setCompressionType(Compression.Algorithm.GZ).setBloomFilterType(bloomType).setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS).build();
region = TEST_UTIL.createTestRegion(TABLE_NAME, columnFamilyDescriptor);
createStoreFile(new int[] { 1, 2, 6 });
createStoreFile(new int[] { 1, 2, 3, 7 });
createStoreFile(new int[] { 1, 9 });
scanColSet(new int[] { 1, 4, 6, 7 }, new int[] { 1, 6, 7 });
HBaseTestingUtil.closeRegionAndWAL(region);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class BulkLoadHFilesTool method splitStoreFile.
private List<LoadQueueItem> splitStoreFile(LoadQueueItem item, TableDescriptor tableDesc, byte[] splitKey) throws IOException {
Path hfilePath = item.getFilePath();
byte[] family = item.getFamily();
Path tmpDir = hfilePath.getParent();
if (!tmpDir.getName().equals(TMP_DIR)) {
tmpDir = new Path(tmpDir, TMP_DIR);
}
LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting...");
String uniqueName = getUniqueName();
ColumnFamilyDescriptor familyDesc = tableDesc.getColumnFamily(family);
Path botOut = new Path(tmpDir, uniqueName + ".bottom");
Path topOut = new Path(tmpDir, uniqueName + ".top");
splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
FileSystem fs = tmpDir.getFileSystem(getConf());
fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx"));
fs.setPermission(botOut, FsPermission.valueOf("-rwxrwxrwx"));
fs.setPermission(topOut, FsPermission.valueOf("-rwxrwxrwx"));
// Add these back at the *front* of the queue, so there's a lower
// chance that the region will just split again before we get there.
List<LoadQueueItem> lqis = new ArrayList<>(2);
lqis.add(new LoadQueueItem(family, botOut));
lqis.add(new LoadQueueItem(family, topOut));
// It is not part of the original input files.
try {
if (tmpDir.getName().equals(TMP_DIR)) {
fs.delete(hfilePath, false);
}
} catch (IOException e) {
LOG.warn("Unable to delete temporary split file " + hfilePath);
}
LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut);
return lqis;
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class SnapshotManifest method addRegion.
protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor) throws IOException {
boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
try {
Path baseDir = tableDir;
// Open the RegionFS
if (isMobRegion) {
baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
}
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true);
monitor.rethrowException();
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing region-info for snapshot.");
Object regionData = visitor.regionOpen(regionInfo);
monitor.rethrowException();
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
// 1000 files/batch, far more than the number of store files under a single column family.
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
Object familyData = visitor.familyOpen(regionData, cfd.getName());
monitor.rethrowException();
StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, htd, cfd, regionFs);
List<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles.isEmpty()) {
LOG.debug("No files under family: {}", cfd.getNameAsString());
continue;
}
// 2.1. build the snapshot reference for the store
// iterate through all the store's files and create "references".
addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
} catch (IOException e) {
// the mob directory might not be created yet, so do nothing when it is a mob region
if (!isMobRegion) {
throw e;
}
}
}
Aggregations