Search in sources :

Example 66 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestDeleteColumnFamilyProcedureFromClient method deleteColumnFamilyWithMultipleRegions.

@Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception {
    Admin admin = TEST_UTIL.getAdmin();
    TableDescriptor beforehtd = admin.getDescriptor(TABLENAME);
    FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
    // 1 - Check if table exists in descriptor
    assertTrue(admin.isTableAvailable(TABLENAME));
    // 2 - Check if all three families exist in descriptor
    assertEquals(3, beforehtd.getColumnFamilyCount());
    ColumnFamilyDescriptor[] families = beforehtd.getColumnFamilies();
    for (int i = 0; i < families.length; i++) {
        assertTrue(families[i].getNameAsString().equals("cf" + (i + 1)));
    }
    // 3 - Check if table exists in FS
    Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME);
    assertTrue(fs.exists(tableDir));
    // 4 - Check if all the 3 column families exist in FS
    FileStatus[] fileStatus = fs.listStatus(tableDir);
    for (int i = 0; i < fileStatus.length; i++) {
        if (fileStatus[i].isDirectory() == true) {
            FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
                        return false;
                    }
                    return true;
                }
            });
            int k = 1;
            for (int j = 0; j < cf.length; j++) {
                if (cf[j].isDirectory() == true && cf[j].getPath().getName().startsWith(".") == false) {
                    assertEquals(cf[j].getPath().getName(), "cf" + k);
                    k++;
                }
            }
        }
    }
    // TEST - Disable and delete the column family
    admin.disableTable(TABLENAME);
    admin.deleteColumnFamily(TABLENAME, Bytes.toBytes("cf2"));
    // 5 - Check if only 2 column families exist in the descriptor
    TableDescriptor afterhtd = admin.getDescriptor(TABLENAME);
    assertEquals(2, afterhtd.getColumnFamilyCount());
    ColumnFamilyDescriptor[] newFamilies = afterhtd.getColumnFamilies();
    assertTrue(newFamilies[0].getNameAsString().equals("cf1"));
    assertTrue(newFamilies[1].getNameAsString().equals("cf3"));
    // 6 - Check if the second column family is gone from the FS
    fileStatus = fs.listStatus(tableDir);
    for (int i = 0; i < fileStatus.length; i++) {
        if (fileStatus[i].isDirectory() == true) {
            FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (WALSplitUtil.isSequenceIdFile(p)) {
                        return false;
                    }
                    return true;
                }
            });
            for (int j = 0; j < cf.length; j++) {
                if (cf[j].isDirectory() == true) {
                    assertFalse(cf[j].getPath().getName().equals("cf2"));
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) Admin(org.apache.hadoop.hbase.client.Admin) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 67 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestSecureBulkLoadManager method prepareHFile.

private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception {
    TableDescriptor desc = testUtil.getAdmin().getDescriptor(TABLE);
    ColumnFamilyDescriptor family = desc.getColumnFamily(FAMILY);
    Compression.Algorithm compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    CacheConfig writerCacheConf = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP);
    writerCacheConf.setCacheDataOnWrite(false);
    HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(false).withIncludesTags(true).withCompression(compression).withCompressTags(family.isCompressTags()).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(family.getBlocksize()).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(Encryption.Context.NONE).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
    StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, dir.getFileSystem(conf)).withOutputDir(new Path(dir, family.getNameAsString())).withBloomType(family.getBloomFilterType()).withMaxKeyCount(Integer.MAX_VALUE).withFileContext(hFileContext);
    StoreFileWriter writer = builder.build();
    Put put = new Put(key);
    put.addColumn(FAMILY, COLUMN, value);
    for (Cell c : put.get(FAMILY, COLUMN)) {
        writer.append(c);
    }
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Example 68 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestScanWithBloomError method testThreeStoreFiles.

@Test
public void testThreeStoreFiles() throws IOException {
    ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)).setCompressionType(Compression.Algorithm.GZ).setBloomFilterType(bloomType).setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS).build();
    region = TEST_UTIL.createTestRegion(TABLE_NAME, columnFamilyDescriptor);
    createStoreFile(new int[] { 1, 2, 6 });
    createStoreFile(new int[] { 1, 2, 3, 7 });
    createStoreFile(new int[] { 1, 9 });
    scanColSet(new int[] { 1, 4, 6, 7 }, new int[] { 1, 6, 7 });
    HBaseTestingUtil.closeRegionAndWAL(region);
}
Also used : ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Test(org.junit.Test)

Example 69 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class BulkLoadHFilesTool method splitStoreFile.

private List<LoadQueueItem> splitStoreFile(LoadQueueItem item, TableDescriptor tableDesc, byte[] splitKey) throws IOException {
    Path hfilePath = item.getFilePath();
    byte[] family = item.getFamily();
    Path tmpDir = hfilePath.getParent();
    if (!tmpDir.getName().equals(TMP_DIR)) {
        tmpDir = new Path(tmpDir, TMP_DIR);
    }
    LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting...");
    String uniqueName = getUniqueName();
    ColumnFamilyDescriptor familyDesc = tableDesc.getColumnFamily(family);
    Path botOut = new Path(tmpDir, uniqueName + ".bottom");
    Path topOut = new Path(tmpDir, uniqueName + ".top");
    splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
    FileSystem fs = tmpDir.getFileSystem(getConf());
    fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx"));
    fs.setPermission(botOut, FsPermission.valueOf("-rwxrwxrwx"));
    fs.setPermission(topOut, FsPermission.valueOf("-rwxrwxrwx"));
    // Add these back at the *front* of the queue, so there's a lower
    // chance that the region will just split again before we get there.
    List<LoadQueueItem> lqis = new ArrayList<>(2);
    lqis.add(new LoadQueueItem(family, botOut));
    lqis.add(new LoadQueueItem(family, topOut));
    // It is not part of the original input files.
    try {
        if (tmpDir.getName().equals(TMP_DIR)) {
            fs.delete(hfilePath, false);
        }
    } catch (IOException e) {
        LOG.warn("Unable to delete temporary split file " + hfilePath);
    }
    LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut);
    return lqis;
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)

Example 70 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class SnapshotManifest method addRegion.

protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor) throws IOException {
    boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
    try {
        Path baseDir = tableDir;
        // Open the RegionFS
        if (isMobRegion) {
            baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
        }
        HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true);
        monitor.rethrowException();
        // 1. dump region meta info into the snapshot directory
        LOG.debug("Storing region-info for snapshot.");
        Object regionData = visitor.regionOpen(regionInfo);
        monitor.rethrowException();
        // 2. iterate through all the stores in the region
        LOG.debug("Creating references for hfiles");
        // 1000 files/batch, far more than the number of store files under a single column family.
        for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
            Object familyData = visitor.familyOpen(regionData, cfd.getName());
            monitor.rethrowException();
            StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, htd, cfd, regionFs);
            List<StoreFileInfo> storeFiles = tracker.load();
            if (storeFiles.isEmpty()) {
                LOG.debug("No files under family: {}", cfd.getNameAsString());
                continue;
            }
            // 2.1. build the snapshot reference for the store
            // iterate through all the store's files and create "references".
            addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
            visitor.familyClose(regionData, familyData);
        }
        visitor.regionClose(regionData);
    } catch (IOException e) {
        // the mob directory might not be created yet, so do nothing when it is a mob region
        if (!isMobRegion) {
            throw e;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) StoreFileTracker(org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo)

Aggregations

ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)199 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)95 Test (org.junit.Test)92 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)78 IOException (java.io.IOException)44 TableName (org.apache.hadoop.hbase.TableName)44 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)42 Path (org.apache.hadoop.fs.Path)41 Admin (org.apache.hadoop.hbase.client.Admin)36 Configuration (org.apache.hadoop.conf.Configuration)34 ArrayList (java.util.ArrayList)32 Put (org.apache.hadoop.hbase.client.Put)32 FileSystem (org.apache.hadoop.fs.FileSystem)28 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Get (org.apache.hadoop.hbase.client.Get)20 Result (org.apache.hadoop.hbase.client.Result)19 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)17 Scan (org.apache.hadoop.hbase.client.Scan)17 Table (org.apache.hadoop.hbase.client.Table)17