Search in sources :

Example 36 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class CatalogJanitor method checkDaughterInFs.

/**
 * Checks if a daughter region -- either splitA or splitB -- still holds references to parent.
 * @param parent Parent region
 * @param daughter Daughter region
 * @return A pair where the first boolean says whether or not the daughter region directory exists
 *         in the filesystem and then the second boolean says whether the daughter has references
 *         to the parent.
 */
private static Pair<Boolean, Boolean> checkDaughterInFs(MasterServices services, final RegionInfo parent, final RegionInfo daughter) throws IOException {
    if (daughter == null) {
        return new Pair<>(Boolean.FALSE, Boolean.FALSE);
    }
    FileSystem fs = services.getMasterFileSystem().getFileSystem();
    Path rootdir = services.getMasterFileSystem().getRootDir();
    Path tabledir = CommonFSUtils.getTableDir(rootdir, daughter.getTable());
    Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
    HRegionFileSystem regionFs;
    try {
        if (!CommonFSUtils.isExists(fs, daughterRegionDir)) {
            return new Pair<>(Boolean.FALSE, Boolean.FALSE);
        }
    } catch (IOException ioe) {
        LOG.error("Error trying to determine if daughter region exists, " + "assuming exists and has references", ioe);
        return new Pair<>(Boolean.TRUE, Boolean.TRUE);
    }
    boolean references = false;
    TableDescriptor parentDescriptor = services.getTableDescriptors().get(parent.getTable());
    try {
        regionFs = HRegionFileSystem.openRegionFromFileSystem(services.getConfiguration(), fs, tabledir, daughter, true);
        for (ColumnFamilyDescriptor family : parentDescriptor.getColumnFamilies()) {
            references = regionFs.hasReferences(family.getNameAsString());
            if (references) {
                break;
            }
        }
    } catch (IOException e) {
        LOG.error("Error trying to determine referenced files from : " + daughter.getEncodedName() + ", to: " + parent.getEncodedName() + " assuming has references", e);
        return new Pair<>(Boolean.TRUE, Boolean.TRUE);
    }
    return new Pair<>(Boolean.TRUE, references);
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Pair(org.apache.hadoop.hbase.util.Pair)

Example 37 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class AssignRegionHandler method process.

@Override
public void process() throws IOException {
    HRegionServer rs = getServer();
    String encodedName = regionInfo.getEncodedName();
    byte[] encodedNameBytes = regionInfo.getEncodedNameAsBytes();
    String regionName = regionInfo.getRegionNameAsString();
    Region onlineRegion = rs.getRegion(encodedName);
    if (onlineRegion != null) {
        LOG.warn("Received OPEN for {} which is already online", regionName);
        // reportRegionStateTransition any more.
        return;
    }
    Boolean previous = rs.getRegionsInTransitionInRS().putIfAbsent(encodedNameBytes, Boolean.TRUE);
    if (previous != null) {
        if (previous) {
            // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it.
            LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + " - ignoring this new request for this region.", regionName);
        } else {
            // The region is closing. This is possible as we will update the region state to CLOSED when
            // calling reportRegionStateTransition, so the HMaster will think the region is offline,
            // before we actually close the region, as reportRegionStateTransition is part of the
            // closing process.
            long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
            LOG.info("Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff);
            rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS);
        }
        return;
    }
    LOG.info("Open {}", regionName);
    HRegion region;
    try {
        TableDescriptor htd = tableDesc != null ? tableDesc : rs.getTableDescriptors().get(regionInfo.getTable());
        if (htd == null) {
            throw new IOException("Missing table descriptor for " + regionName);
        }
        // pass null for the last parameter, which used to be a CancelableProgressable, as now the
        // opening can not be interrupted by a close request any more.
        Configuration conf = rs.getConfiguration();
        region = HRegion.openHRegion(regionInfo, htd, rs.getWAL(regionInfo), conf, rs, null);
    } catch (IOException e) {
        cleanUpAndReportFailure(e);
        return;
    }
    // From here on out, this is PONR. We can not revert back. The only way to address an
    // exception from here on out is to abort the region server.
    rs.postOpenDeployTasks(new PostOpenDeployContext(region, openProcId, masterSystemTime));
    rs.addRegion(region);
    LOG.info("Opened {}", regionName);
    // Cache the open region procedure id after report region transition succeed.
    rs.finishRegionProcedure(openProcId);
    Boolean current = rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes());
    if (current == null) {
        // Should NEVER happen, but let's be paranoid.
        LOG.error("Bad state: we've just opened {} which was NOT in transition", regionName);
    } else if (!current) {
        // Should NEVER happen, but let's be paranoid.
        LOG.error("Bad state: we've just opened {} which was closing", regionName);
    }
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) PostOpenDeployContext(org.apache.hadoop.hbase.regionserver.RegionServerServices.PostOpenDeployContext) Configuration(org.apache.hadoop.conf.Configuration) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Example 38 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestHFileOutputFormat2 method testSerializeDeserializeFamilyDataBlockEncodingMap.

/**
 * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}.
 * Tests that the family data block encoding map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
    for (int numCfs = 0; numCfs <= 3; numCfs++) {
        Configuration conf = new Configuration(this.util.getConfiguration());
        Map<String, DataBlockEncoding> familyToDataBlockEncoding = getMockColumnFamiliesForDataBlockEncoding(numCfs);
        Table table = Mockito.mock(Table.class);
        setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding);
        TableDescriptor tableDescriptor = table.getDescriptor();
        conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor)));
        // read back family specific data block encoding settings from the
        // configuration
        Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap = HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf);
        // used mock values
        for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
            assertEquals("DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), entry.getValue(), retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey())));
        }
    }
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 39 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestCopyTable method createTable.

private Table createTable(TableName tableName, byte[] family, boolean isMob) throws IOException {
    if (isMob) {
        ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true).setMobThreshold(1).build();
        TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd).build();
        return TEST_UTIL.createTable(desc, null);
    } else {
        return TEST_UTIL.createTable(tableName, family);
    }
}
Also used : ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 40 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestImportExport method testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily.

@Test
public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable {
    final TableName exportTable = TableName.valueOf(name.getMethodName());
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
    UTIL.getAdmin().createTable(desc);
    Table exportT = UTIL.getConnection().getTable(exportTable);
    // Add first version of QUAL
    Put p = new Put(ROW1);
    p.addColumn(FAMILYA, QUAL, now, QUAL);
    exportT.put(p);
    // Add Delete family marker
    Delete d = new Delete(ROW1, now + 3);
    exportT.delete(d);
    // Add second version of QUAL
    p = new Put(ROW1);
    p.addColumn(FAMILYA, QUAL, now + 5, Bytes.toBytes("s"));
    exportT.put(p);
    // Add second Delete family marker
    d = new Delete(ROW1, now + 7);
    exportT.delete(d);
    String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), FQ_OUTPUT_DIR, // max number of key versions per key to export
    "1000" };
    assertTrue(runExport(args));
    final String importTable = name.getMethodName() + "import";
    desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE).build()).build();
    UTIL.getAdmin().createTable(desc);
    Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable));
    args = new String[] { importTable, FQ_OUTPUT_DIR };
    assertTrue(runImport(args));
    Scan s = new Scan();
    s.readAllVersions();
    s.setRaw(true);
    ResultScanner importedTScanner = importT.getScanner(s);
    Result importedTResult = importedTScanner.next();
    ResultScanner exportedTScanner = exportT.getScanner(s);
    Result exportedTResult = exportedTScanner.next();
    try {
        Result.compareResults(exportedTResult, importedTResult);
    } catch (Throwable e) {
        fail("Original and imported tables data comparision failed with error:" + e.getMessage());
    } finally {
        exportT.close();
        importT.close();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39