Search in sources :

Example 1 with OzoneChecksumException

use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.

the class KeyValueContainerCheck method verifyChecksum.

private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException {
    ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData());
    int checksumCount = checksumData.getChecksums().size();
    int bytesPerChecksum = checksumData.getBytesPerChecksum();
    Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum);
    ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum);
    long bytesRead = 0;
    try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
        if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
            channel.position(chunk.getOffset());
        }
        for (int i = 0; i < checksumCount; i++) {
            // limit last read for FILE_PER_BLOCK, to avoid reading next chunk
            if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) {
                buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
            }
            int v = channel.read(buffer);
            if (v == -1) {
                break;
            }
            bytesRead += v;
            buffer.flip();
            throttler.throttle(v, canceler);
            ByteString expected = checksumData.getChecksums().get(i);
            ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0);
            if (!expected.equals(actual)) {
                throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID()));
            }
        }
        if (bytesRead != chunk.getLen()) {
            throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()));
        }
    }
}
Also used : ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) Checksum(org.apache.hadoop.ozone.common.Checksum) FileChannel(java.nio.channels.FileChannel) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ByteBuffer(java.nio.ByteBuffer)

Example 2 with OzoneChecksumException

use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.

the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    byte[] data = value.getBytes(UTF_8);
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
    Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes.
    List<Container> containerList = new ArrayList<>();
    Container container;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            containerList.add(container);
            if (containerList.size() == 3) {
                break;
            }
        }
    }
    Assert.assertTrue("Container not found", !containerList.isEmpty());
    corruptData(containerList.get(0), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(1), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(2), key);
    // Try reading the key. Read will fail here as all the replica are corrupt
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        fail("Reading corrupted data should fail.");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) Test(org.junit.Test)

Example 3 with OzoneChecksumException

use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.

the class ReadReplicas method downloadReplicasAndCreateManifest.

private void downloadReplicasAndCreateManifest(String keyName, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicasWithoutChecksum, String directoryName, JsonArray blocks) throws IOException {
    int blockIndex = 0;
    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> block : replicas.entrySet()) {
        JsonObject blockJson = new JsonObject();
        JsonArray replicasJson = new JsonArray();
        blockIndex += 1;
        blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex);
        blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, block.getKey().getContainerID());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, block.getKey().getLocalID());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, block.getKey().getLength());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, block.getKey().getOffset());
        for (Map.Entry<DatanodeDetails, OzoneInputStream> replica : block.getValue().entrySet()) {
            JsonObject replicaJson = new JsonObject();
            replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, replica.getKey().getHostName());
            replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, replica.getKey().getUuidString());
            OzoneInputStream is = replica.getValue();
            String fileName = keyName + "_block" + blockIndex + "_" + replica.getKey().getHostName();
            System.out.println("Writing : " + fileName);
            File replicaFile = new File(outputDir + "/" + directoryName + "/" + fileName);
            try {
                Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
            } catch (IOException e) {
                Throwable cause = e.getCause();
                replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage());
                if (cause instanceof OzoneChecksumException) {
                    BlockID blockID = block.getKey().getBlockID();
                    String datanodeUUID = replica.getKey().getUuidString();
                    is = getInputStreamWithoutChecksum(replicasWithoutChecksum, datanodeUUID, blockID);
                    Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
                } else if (cause instanceof StatusRuntimeException) {
                    break;
                }
            } finally {
                is.close();
            }
            replicasJson.add(replicaJson);
        }
        blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson);
        blocks.add(blockJson);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) JsonObject(com.google.gson.JsonObject) IOException(java.io.IOException) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) JsonArray(com.google.gson.JsonArray) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) StatusRuntimeException(org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException) BlockID(org.apache.hadoop.hdds.client.BlockID) Map(java.util.Map) File(java.io.File)

Example 4 with OzoneChecksumException

use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.

the class TestOzoneRpcClientWithRatis method testGetKeyAndFileWithNetworkTopology.

/**
 * Tests get the information of key with network topology awareness enabled.
 * @throws IOException
 */
@Test
public void testGetKeyAndFileWithNetworkTopology() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    getStore().createVolume(volumeName);
    OzoneVolume volume = getStore().getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>())) {
        out.write(value.getBytes(UTF_8));
    }
    // Since the rpc client is outside of cluster, then getFirstNode should be
    // equal to getClosestNode.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    // read key with topology aware read enabled
    try (OzoneInputStream is = bucket.readKey(keyName)) {
        byte[] b = new byte[value.getBytes(UTF_8).length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
    } catch (OzoneChecksumException e) {
        fail("Read key should succeed");
    }
    // read file with topology aware read enabled
    try (OzoneInputStream is = bucket.readKey(keyName)) {
        byte[] b = new byte[value.getBytes(UTF_8).length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
    } catch (OzoneChecksumException e) {
        fail("Read file should succeed");
    }
    // read key with topology aware read disabled
    conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, false);
    try (OzoneClient newClient = OzoneClientFactory.getRpcClient(conf)) {
        ObjectStore newStore = newClient.getObjectStore();
        OzoneBucket newBucket = newStore.getVolume(volumeName).getBucket(bucketName);
        try (OzoneInputStream is = newBucket.readKey(keyName)) {
            byte[] b = new byte[value.getBytes(UTF_8).length];
            is.read(b);
            Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
        } catch (OzoneChecksumException e) {
            fail("Read key should succeed");
        }
        // read file with topology aware read disabled
        try (OzoneInputStream is = newBucket.readFile(keyName)) {
            byte[] b = new byte[value.getBytes(UTF_8).length];
            is.read(b);
            Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
        } catch (OzoneChecksumException e) {
            fail("Read file should succeed");
        }
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) Test(org.junit.Test)

Example 5 with OzoneChecksumException

use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.

the class BlockOutputStream method writeChunkToContainer.

/**
 * Writes buffered data as a new chunk to the container and saves chunk
 * information to be used later in putKey call.
 *
 * @throws IOException if there is an I/O error while performing the call
 * @throws OzoneChecksumException if there is an error while computing
 * checksum
 */
private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
    int effectiveChunkSize = chunk.remaining();
    final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
    final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
    ChecksumData checksumData = checksum.computeChecksum(chunk);
    ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
    }
    try {
        XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token);
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
        future.thenApplyAsync(e -> {
            try {
                validateResponse(e);
            } catch (IOException sce) {
                future.completeExceptionally(sce);
            }
            return e;
        }, responseExecutor).exceptionally(e -> {
            String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " " + "into block " + blockID;
            LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
            CompletionException ce = new CompletionException(msg, e);
            setIoException(ce);
            throw ce;
        });
    } catch (IOException | ExecutionException e) {
        throw new IOException(EXCEPTION_MSG + e.toString(), e);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        handleInterruptedException(ex, false);
    }
    containerBlockData.addChunks(chunkInfo);
}
Also used : BlockID(org.apache.hadoop.hdds.client.BlockID) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerProtocolCalls.writeChunkAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) ArrayList(java.util.ArrayList) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) Checksum(org.apache.hadoop.ozone.common.Checksum) ExecutorService(java.util.concurrent.ExecutorService) OutputStream(java.io.OutputStream) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) Logger(org.slf4j.Logger) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Token(org.apache.hadoop.security.token.Token) ContainerProtocolCalls.putBlockAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync) Executors(java.util.concurrent.Executors) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) IOException(java.io.IOException) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletionException(java.util.concurrent.CompletionException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

OzoneChecksumException (org.apache.hadoop.ozone.common.OzoneChecksumException)6 IOException (java.io.IOException)3 BlockID (org.apache.hadoop.hdds.client.BlockID)3 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)2 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)2 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 JsonArray (com.google.gson.JsonArray)1 JsonObject (com.google.gson.JsonObject)1 File (java.io.File)1 OutputStream (java.io.OutputStream)1 ByteBuffer (java.nio.ByteBuffer)1 FileChannel (java.nio.channels.FileChannel)1 List (java.util.List)1 Map (java.util.Map)1