Search in sources :

Example 1 with HFileSystem

use of org.apache.hadoop.hbase.fs.HFileSystem in project hbase by apache.

the class HRegionServer method initializeFileSystem.

private void initializeFileSystem() throws IOException {
    // Get fs instance used by this RS.  Do we use checksum verification in the hbase? If hbase
    // checksum verification enabled, then automatically switch off hdfs checksum verification.
    boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
    FSUtils.setFsDefault(this.conf, FSUtils.getWALRootDir(this.conf));
    this.walFs = new HFileSystem(this.conf, useHBaseChecksum);
    this.walRootDir = FSUtils.getWALRootDir(this.conf);
    // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else
    // underlying hadoop hdfs accessors will be going against wrong filesystem
    // (unless all is set to defaults).
    FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
    this.fs = new HFileSystem(this.conf, useHBaseChecksum);
    this.rootDir = FSUtils.getRootDir(this.conf);
    this.tableDescriptors = getFsTableDescriptors();
}
Also used : HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 2 with HFileSystem

use of org.apache.hadoop.hbase.fs.HFileSystem in project hbase by apache.

the class HBaseServerBase method initializeFileSystem.

protected final void initializeFileSystem() throws IOException {
    // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
    // checksum verification enabled, then automatically switch off hdfs checksum verification.
    boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
    String walDirUri = CommonFSUtils.getDirUri(this.conf, new Path(conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR))));
    // set WAL's uri
    if (walDirUri != null) {
        CommonFSUtils.setFsDefault(this.conf, walDirUri);
    }
    // init the WALFs
    this.walFs = new HFileSystem(this.conf, useHBaseChecksum);
    this.walRootDir = CommonFSUtils.getWALRootDir(this.conf);
    // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else
    // underlying hadoop hdfs accessors will be going against wrong filesystem
    // (unless all is set to defaults).
    String rootDirUri = CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR)));
    if (rootDirUri != null) {
        CommonFSUtils.setFsDefault(this.conf, rootDirUri);
    }
    // init the filesystem
    this.dataFs = new HFileSystem(this.conf, useHBaseChecksum);
    this.dataRootDir = CommonFSUtils.getRootDir(this.conf);
    this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, !canUpdateTableDescriptor(), cacheTableDescriptor());
}
Also used : Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 3 with HFileSystem

use of org.apache.hadoop.hbase.fs.HFileSystem in project hbase by apache.

the class HFileOutputFormat2 method createRecordWriter.

static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter(final TaskAttemptContext context, final OutputCommitter committer) throws IOException {
    // Get the path of the temporary output file
    final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
    final Configuration conf = context.getConfiguration();
    final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
    final String writeTableNames = conf.get(OUTPUT_TABLE_NAME_CONF_KEY);
    if (writeTableNames == null || writeTableNames.isEmpty()) {
        throw new IllegalArgumentException("" + OUTPUT_TABLE_NAME_CONF_KEY + " cannot be empty");
    }
    final FileSystem fs = outputDir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String defaultCompressionStr = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr);
    String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY);
    final Algorithm overriddenCompression = compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null;
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
    final Set<String> allTableNames = Arrays.stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet());
    // create a map from column family to the compression algorithm
    final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
    final Map<byte[], String> bloomParamMap = createFamilyBloomParamMap(conf);
    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
    final Map<byte[], DataBlockEncoding> datablockEncodingMap = createFamilyDataBlockEncodingMap(conf);
    final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null;
    return new RecordWriter<ImmutableBytesWritable, V>() {

        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<>(Bytes.BYTES_COMPARATOR);

        private final Map<byte[], byte[]> previousRows = new TreeMap<>(Bytes.BYTES_COMPARATOR);

        private final long now = EnvironmentEdgeManager.currentTime();

        private byte[] tableNameBytes = writeMultipleTables ? null : Bytes.toBytes(writeTableNames);

        @Override
        public void write(ImmutableBytesWritable row, V cell) throws IOException {
            Cell kv = cell;
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters(null);
                return;
            }
            byte[] rowKey = CellUtil.cloneRow(kv);
            int length = (PrivateCellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT;
            byte[] family = CellUtil.cloneFamily(kv);
            if (writeMultipleTables) {
                tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get());
                tableNameBytes = TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString().getBytes(Charset.defaultCharset());
                if (!allTableNames.contains(Bytes.toString(tableNameBytes))) {
                    throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + " not expected");
                }
            }
            byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family);
            WriterLength wl = this.writers.get(tableAndFamily);
            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                Path writerPath = null;
                if (writeMultipleTables) {
                    Path tableRelPath = getTableRelativePath(tableNameBytes);
                    writerPath = new Path(outputDir, new Path(tableRelPath, Bytes.toString(family)));
                } else {
                    writerPath = new Path(outputDir, Bytes.toString(family));
                }
                fs.mkdirs(writerPath);
                configureStoragePolicy(conf, fs, tableAndFamily, writerPath);
            }
            // This can only happen once a row is finished though
            if (wl != null && wl.written + length >= maxsize && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) {
                rollWriters(wl);
            }
            // create a new WAL writer, if necessary
            if (wl == null || wl.writer == null) {
                InetSocketAddress[] favoredNodes = null;
                if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
                    HRegionLocation loc = null;
                    String tableName = Bytes.toString(tableNameBytes);
                    if (tableName != null) {
                        try (Connection connection = ConnectionFactory.createConnection(createRemoteClusterConf(conf));
                            RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) {
                            loc = locator.getRegionLocation(rowKey);
                        } catch (Throwable e) {
                            LOG.warn("Something wrong locating rowkey {} in {}", Bytes.toString(rowKey), tableName, e);
                            loc = null;
                        }
                    }
                    if (null == loc) {
                        LOG.trace("Failed get of location, use default writer {}", Bytes.toString(rowKey));
                    } else {
                        LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey));
                        InetSocketAddress initialIsa = new InetSocketAddress(loc.getHostname(), loc.getPort());
                        if (initialIsa.isUnresolved()) {
                            LOG.trace("Failed resolve address {}, use default writer", loc.getHostnamePort());
                        } else {
                            LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString());
                            favoredNodes = new InetSocketAddress[] { initialIsa };
                        }
                    }
                }
                wl = getNewWriter(tableNameBytes, family, conf, favoredNodes);
            }
            // we now have the proper WAL writer. full steam ahead
            PrivateCellUtil.updateLatestStamp(cell, this.now);
            wl.writer.append(kv);
            wl.written += length;
            // Copy the row so we know when a row transition.
            this.previousRows.put(family, rowKey);
        }

        private Path getTableRelativePath(byte[] tableNameBytes) {
            String tableName = Bytes.toString(tableNameBytes);
            String[] tableNameParts = tableName.split(":");
            Path tableRelPath = new Path(tableNameParts[0]);
            if (tableNameParts.length > 1) {
                tableRelPath = new Path(tableRelPath, tableNameParts[1]);
            }
            return tableRelPath;
        }

        private void rollWriters(WriterLength writerLength) throws IOException {
            if (writerLength != null) {
                closeWriter(writerLength);
            } else {
                for (WriterLength wl : this.writers.values()) {
                    closeWriter(wl);
                }
            }
        }

        private void closeWriter(WriterLength wl) throws IOException {
            if (wl.writer != null) {
                LOG.info("Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                close(wl.writer);
                wl.writer = null;
            }
            wl.written = 0;
        }

        private Configuration createRemoteClusterConf(Configuration conf) {
            final Configuration newConf = new Configuration(conf);
            final String quorum = conf.get(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY);
            final String clientPort = conf.get(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY);
            final String parent = conf.get(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY);
            if (quorum != null && clientPort != null && parent != null) {
                newConf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
                newConf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.parseInt(clientPort));
                newConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parent);
            }
            for (Entry<String, String> entry : conf) {
                String key = entry.getKey();
                if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) {
                    // Handled them above
                    continue;
                }
                if (entry.getKey().startsWith(REMOTE_CLUSTER_CONF_PREFIX)) {
                    String originalKey = entry.getKey().substring(REMOTE_CLUSTER_CONF_PREFIX.length());
                    if (!originalKey.isEmpty()) {
                        newConf.set(originalKey, entry.getValue());
                    }
                }
            }
            return newConf;
        }

        /*
       * Create a new StoreFile.Writer.
       * @return A WriterLength, containing a new StoreFile.Writer.
       */
        @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", justification = "Not important")
        private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, InetSocketAddress[] favoredNodes) throws IOException {
            byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family);
            Path familydir = new Path(outputDir, Bytes.toString(family));
            if (writeMultipleTables) {
                familydir = new Path(outputDir, new Path(getTableRelativePath(tableName), Bytes.toString(family)));
            }
            WriterLength wl = new WriterLength();
            Algorithm compression = overriddenCompression;
            compression = compression == null ? compressionMap.get(tableAndFamily) : compression;
            compression = compression == null ? defaultCompression : compression;
            BloomType bloomType = bloomTypeMap.get(tableAndFamily);
            bloomType = bloomType == null ? BloomType.NONE : bloomType;
            String bloomParam = bloomParamMap.get(tableAndFamily);
            if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) {
                conf.set(BloomFilterUtil.PREFIX_LENGTH_KEY, bloomParam);
            }
            Integer blockSize = blockSizeMap.get(tableAndFamily);
            blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
            DataBlockEncoding encoding = overriddenEncoding;
            encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding;
            encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
            HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression).withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize).withColumnFamily(family).withTableName(tableName);
            if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
                contextBuilder.withIncludesTags(true);
            }
            HFileContext hFileContext = contextBuilder.build();
            if (null == favoredNodes) {
                wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).build();
            } else {
                wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).withFavoredNodes(favoredNodes).build();
            }
            this.writers.put(tableAndFamily, wl);
            return wl;
        }

        private void close(final StoreFileWriter w) throws IOException {
            if (w != null) {
                w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
                w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        @Override
        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) Cell(org.apache.hadoop.hbase.Cell) MapReduceExtendedCell(org.apache.hadoop.hbase.util.MapReduceExtendedCell) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) FileOutputCommitter(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter) Connection(org.apache.hadoop.hbase.client.Connection) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) Map(java.util.Map) TreeMap(java.util.TreeMap) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 4 with HFileSystem

use of org.apache.hadoop.hbase.fs.HFileSystem in project hbase by apache.

the class TestFSErrorsExposed method testStoreFileScannerThrowsErrors.

/**
 * Injects errors into the pread calls of an on-disk file, and makes
 * sure those bubble up to the StoreFileScanner
 */
@Test
public void testStoreFileScannerThrowsErrors() throws IOException {
    Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
    HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
    FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
    HFileSystem fs = new HFileSystem(faultyfs);
    CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
    HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
    TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
    HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE, true);
    List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, true, false, false, // 0 is passed as readpoint because this test operates on HStoreFile directly
    0);
    KeyValueScanner scanner = scanners.get(0);
    FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
    assertNotNull(inStream);
    scanner.seek(KeyValue.LOWESTKEY);
    // Do at least one successful read
    assertNotNull(scanner.next());
    faultyfs.startFaults();
    try {
        int scanned = 0;
        while (scanner.next() != null) {
            scanned++;
        }
        fail("Scanner didn't throw after faults injected");
    } catch (IOException ioe) {
        LOG.info("Got expected exception", ioe);
        assertTrue(ioe.getMessage().contains("Could not iterate"));
    }
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) IOException(java.io.IOException) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 5 with HFileSystem

use of org.apache.hadoop.hbase.fs.HFileSystem in project hbase by apache.

the class TestChecksum method testChecksumCorruptionInternals.

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i) dos.writeInt(i);
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();
            // Use hbase checksums.
            assertEquals(true, hfs.useHBaseChecksum());
            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).build();
            ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(fs).withFilePath(path).build();
            HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta, TEST_UTIL.getConfiguration());
            HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader();
            DataInputStream in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, pread, false, true);
                assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
                assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            is.close();
            // Now, use a completely new reader. Switch off hbase checksums in
            // the configuration. In this case, we should not detect
            // any retries within hbase.
            Configuration conf = TEST_UTIL.getConfiguration();
            HFileSystem newfs = new HFileSystem(conf, false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(newfs).withFilePath(path).build();
            hbr = new CorruptedFSReaderImpl(context, meta, conf);
            b = hbr.readBlockData(0, -1, pread, false, true);
            is.close();
            b.sanityCheck();
            b = b.unpack(meta, hbr);
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            bb = b.getBufferWithoutHeader();
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Aggregations

HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)11 Path (org.apache.hadoop.fs.Path)10 FileSystem (org.apache.hadoop.fs.FileSystem)7 Configuration (org.apache.hadoop.conf.Configuration)3 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)3 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 TreeMap (java.util.TreeMap)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FilterFileSystem (org.apache.hadoop.fs.FilterFileSystem)1