Search in sources :

Example 66 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestStoreFile method testReferenceToHFileLink.

/**
   * This test creates an hfile and then the dir structures and files to verify that references
   * to hfilelinks (created by snapshot clones) can be properly interpreted.
   */
@Test
public void testReferenceToHFileLink() throws IOException {
    // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
    Configuration testConf = new Configuration(this.conf);
    FSUtils.setRootDir(testConf, testDir);
    // adding legal table name chars to verify regex handles it.
    HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
    StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
    writeStoreFile(writer);
    Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
    // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
    HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
    HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hriClone);
    Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
    HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
    Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
    // create splits of the link.
    // <root>/clone/splitA/<cf>/<reftohfilelink>,
    // <root>/clone/splitB/<cf>/<reftohfilelink>
    HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
    HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
    StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
    f.createReader();
    // top
    Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true);
    // bottom
    Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);
    f.closeReader(true);
    // OK test the thing
    FSUtils.logFileSystemState(fs, testDir, LOG);
    // There is a case where a file with the hfilelink pattern is actually a daughter
    // reference to a hfile link.  This code in StoreFile that handles this case.
    // Try to open store file from link
    StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE);
    // Now confirm that I can read from the ref to link
    int count = 1;
    HFileScanner s = hsfA.createReader().getScanner(false, false);
    s.seekTo();
    while (s.next()) {
        count++;
    }
    // read some rows here
    assertTrue(count > 0);
    // Try to open store file from link
    StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE);
    // Now confirm that I can read from the ref to link
    HFileScanner sB = hsfB.createReader().getScanner(false, false);
    sB.seekTo();
    //count++ as seekTo() will advance the scanner
    count++;
    while (sB.next()) {
        count++;
    }
    // read the rest of the rows
    assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Example 67 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestStoreFile method checkHalfHFile.

private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException {
    Cell midkey = f.createReader().midkey();
    KeyValue midKV = (KeyValue) midkey;
    byte[] midRow = CellUtil.cloneRow(midKV);
    // Create top split.
    HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow);
    Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
    // Create bottom split.
    HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null);
    Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
    // Make readers on top and bottom.
    StoreFileReader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
    StoreFileReader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
    ByteBuffer previous = null;
    LOG.info("Midkey: " + midKV.toString());
    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midKV.getKey());
    try {
        // Now make two HalfMapFiles and assert they can read the full backing
        // file, one from the top and the other from the bottom.
        // Test bottom half first.
        // Now test reading from the top.
        boolean first = true;
        ByteBuffer key = null;
        HFileScanner topScanner = top.getScanner(false, false);
        while ((!topScanner.isSeeked() && topScanner.seekTo()) || (topScanner.isSeeked() && topScanner.next())) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
            if ((topScanner.getReader().getComparator().compare(midKV, key.array(), key.arrayOffset(), key.limit())) > 0) {
                fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey);
            }
            if (first) {
                first = false;
                LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
            }
        }
        LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));
        first = true;
        HFileScanner bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
            }
            assertTrue(key.compareTo(bbMidkeyBytes) < 0);
        }
        if (previous != null) {
            LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);
        // Next test using a midkey that does not exist in the file.
        // First, do a key that is < than first key. Ensure splits behave
        // properly.
        byte[] badmidkey = Bytes.toBytes("  .");
        assertTrue(fs.exists(f.getPath()));
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
        assertNull(bottomPath);
        top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
        // Now read from the top.
        first = true;
        topScanner = top.getScanner(false, false);
        KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue();
        while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
            keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit());
            assertTrue(topScanner.getReader().getComparator().compare(keyOnlyKV, badmidkey, 0, badmidkey.length) >= 0);
            if (first) {
                first = false;
                KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First top when key < bottom: " + keyKV);
                String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last top when key < bottom: " + keyKV);
        String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(tmp.charAt(i) == 'z');
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);
        // Test when badkey is > than last key in file ('||' > 'zz').
        badmidkey = Bytes.toBytes("|||");
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
        assertNull(topPath);
        bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
        first = true;
        bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First bottom when key > top: " + keyKV);
                tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last bottom when key > top: " + keyKV);
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()).charAt(i) == 'z');
        }
    } finally {
        if (top != null) {
            // evict since we are about to delete the file
            top.close(true);
        }
        if (bottom != null) {
            // evict since we are about to delete the file
            bottom.close(true);
        }
        fs.delete(f.getPath(), true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) ByteBuffer(java.nio.ByteBuffer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Cell(org.apache.hadoop.hbase.Cell)

Example 68 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class SnapshotTestingUtils method confirmSnapshotValid.

/**
   * Confirm that the snapshot contains references to all the files that should
   * be in the snapshot. This method also perform some redundant check like
   * the existence of the snapshotinfo or the regioninfo which are done always
   * by the MasterSnapshotVerifier, at the end of the snapshot operation.
   */
public static void confirmSnapshotValid(HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies, Path rootDir, Admin admin, FileSystem fs) throws IOException {
    final Configuration conf = admin.getConfiguration();
    // check snapshot dir
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDescriptor, rootDir);
    assertTrue(fs.exists(snapshotDir));
    HBaseProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
    // Extract regions and families with store files
    final Set<byte[]> snapshotFamilies = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
    Map<String, SnapshotRegionManifest> regionManifests = manifest.getRegionManifestsMap();
    for (SnapshotRegionManifest regionManifest : regionManifests.values()) {
        SnapshotReferenceUtil.visitRegionStoreFiles(regionManifest, new SnapshotReferenceUtil.StoreFileVisitor() {

            @Override
            public void storeFile(final HRegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
                snapshotFamilies.add(Bytes.toBytes(family));
            }
        });
    }
    // Verify that there are store files in the specified families
    if (nonEmptyTestFamilies != null) {
        for (final byte[] familyName : nonEmptyTestFamilies) {
            assertTrue(snapshotFamilies.contains(familyName));
        }
    }
    // Verify that there are no store files in the specified families
    if (emptyTestFamilies != null) {
        for (final byte[] familyName : emptyTestFamilies) {
            assertFalse(snapshotFamilies.contains(familyName));
        }
    }
    // check the region snapshot for all the regions
    List<HRegionInfo> regions = admin.getTableRegions(tableName);
    // remove the non-default regions
    RegionReplicaUtil.removeNonDefaultRegions(regions);
    boolean hasMob = regionManifests.containsKey(MobUtils.getMobRegionInfo(tableName).getEncodedName());
    if (hasMob) {
        assertEquals(regions.size(), regionManifests.size() - 1);
    } else {
        assertEquals(regions.size(), regionManifests.size());
    }
    // Verify Regions (redundant check, see MasterSnapshotVerifier)
    for (HRegionInfo info : regions) {
        String regionName = info.getEncodedName();
        assertTrue(regionManifests.containsKey(regionName));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) IOException(java.io.IOException) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TreeSet(java.util.TreeSet)

Example 69 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class BaseTestHBaseFsck method deleteRegion.

/**
   * Delete a region from assignments, meta, or completely from hdfs.
   * @param unassign if true unassign region if assigned
   * @param metaRow  if true remove region's row from META
   * @param hdfs if true remove region's dir in HDFS
   * @param regionInfoOnly if true remove a region dir's .regioninfo file
   * @param replicaId replica id
   */
protected void deleteRegion(Configuration conf, final HTableDescriptor htd, byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow, boolean hdfs, boolean regionInfoOnly, int replicaId) throws IOException, InterruptedException {
    LOG.info("** Before delete:");
    dumpMeta(htd.getTableName());
    List<HRegionLocation> locations;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
        locations = rl.getAllRegionLocations();
    }
    for (HRegionLocation location : locations) {
        HRegionInfo hri = location.getRegionInfo();
        ServerName hsa = location.getServerName();
        if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0 && hri.getReplicaId() == replicaId) {
            LOG.info("RegionName: " + hri.getRegionNameAsString());
            byte[] deleteRow = hri.getRegionName();
            if (unassign) {
                LOG.info("Undeploying region " + hri + " from server " + hsa);
                undeployRegion(connection, hsa, hri);
            }
            if (regionInfoOnly) {
                LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
                Path rootDir = FSUtils.getRootDir(conf);
                FileSystem fs = rootDir.getFileSystem(conf);
                Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
                Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
                fs.delete(hriPath, true);
            }
            if (hdfs) {
                LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
                Path rootDir = FSUtils.getRootDir(conf);
                FileSystem fs = rootDir.getFileSystem(conf);
                Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
                HBaseFsck.debugLsr(conf, p);
                boolean success = fs.delete(p, true);
                LOG.info("Deleted " + p + " sucessfully? " + success);
                HBaseFsck.debugLsr(conf, p);
            }
            if (metaRow) {
                try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) {
                    Delete delete = new Delete(deleteRow);
                    meta.delete(delete);
                }
            }
        }
        LOG.info(hri.toString() + hsa.toString());
    }
    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("*** After delete:");
    dumpMeta(htd.getTableName());
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) Delete(org.apache.hadoop.hbase.client.Delete) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Table(org.apache.hadoop.hbase.client.Table) ServerName(org.apache.hadoop.hbase.ServerName) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 70 with HRegionInfo

use of org.apache.hadoop.hbase.HRegionInfo in project hbase by apache.

the class TestFavoredNodeAssignmentHelper method secondaryAndTertiaryRSPlacementHelper.

private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> secondaryAndTertiaryRSPlacementHelper(int regionCount, Map<String, Integer> rackToServerCount) {
    Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
    List<ServerName> servers = getServersFromRack(rackToServerCount);
    FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
    Map<ServerName, List<HRegionInfo>> assignmentMap = new HashMap<ServerName, List<HRegionInfo>>();
    helper.initialize();
    // create regions
    List<HRegionInfo> regions = new ArrayList<>(regionCount);
    for (int i = 0; i < regionCount; i++) {
        HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()), Bytes.toBytes(i), Bytes.toBytes(i + 1));
        regions.add(region);
    }
    // place the regions
    helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
    return new Triple<>(primaryRSMap, helper, regions);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Triple(org.apache.hadoop.hbase.util.Triple) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)408 ServerName (org.apache.hadoop.hbase.ServerName)153 Test (org.junit.Test)141 TableName (org.apache.hadoop.hbase.TableName)118 ArrayList (java.util.ArrayList)86 IOException (java.io.IOException)83 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)75 Path (org.apache.hadoop.fs.Path)63 List (java.util.List)59 HashMap (java.util.HashMap)57 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)49 Table (org.apache.hadoop.hbase.client.Table)47 Map (java.util.Map)43 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)41 FileSystem (org.apache.hadoop.fs.FileSystem)40 Configuration (org.apache.hadoop.conf.Configuration)38 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)35 TreeMap (java.util.TreeMap)26 HashSet (java.util.HashSet)23 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)22