use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.
the class KeyValueContainerCheck method verifyChecksum.
private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException {
ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData());
int checksumCount = checksumData.getChecksums().size();
int bytesPerChecksum = checksumData.getBytesPerChecksum();
Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum);
ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum);
long bytesRead = 0;
try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
channel.position(chunk.getOffset());
}
for (int i = 0; i < checksumCount; i++) {
// limit last read for FILE_PER_BLOCK, to avoid reading next chunk
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) {
buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
}
int v = channel.read(buffer);
if (v == -1) {
break;
}
bytesRead += v;
buffer.flip();
throttler.throttle(v, canceler);
ByteString expected = checksumData.getChecksums().get(i);
ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0);
if (!expected.equals(actual)) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID()));
}
}
if (bytesRead != chunk.getLen()) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()));
}
}
}
use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.
the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
byte[] data = value.getBytes(UTF_8);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes.
List<Container> containerList = new ArrayList<>();
Container container;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
containerList.add(container);
if (containerList.size() == 3) {
break;
}
}
}
Assert.assertTrue("Container not found", !containerList.isEmpty());
corruptData(containerList.get(0), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replica are corrupt
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
fail("Reading corrupted data should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
}
}
use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.
the class ReadReplicas method downloadReplicasAndCreateManifest.
private void downloadReplicasAndCreateManifest(String keyName, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicasWithoutChecksum, String directoryName, JsonArray blocks) throws IOException {
int blockIndex = 0;
for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> block : replicas.entrySet()) {
JsonObject blockJson = new JsonObject();
JsonArray replicasJson = new JsonArray();
blockIndex += 1;
blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex);
blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, block.getKey().getContainerID());
blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, block.getKey().getLocalID());
blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, block.getKey().getLength());
blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, block.getKey().getOffset());
for (Map.Entry<DatanodeDetails, OzoneInputStream> replica : block.getValue().entrySet()) {
JsonObject replicaJson = new JsonObject();
replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, replica.getKey().getHostName());
replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, replica.getKey().getUuidString());
OzoneInputStream is = replica.getValue();
String fileName = keyName + "_block" + blockIndex + "_" + replica.getKey().getHostName();
System.out.println("Writing : " + fileName);
File replicaFile = new File(outputDir + "/" + directoryName + "/" + fileName);
try {
Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
Throwable cause = e.getCause();
replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage());
if (cause instanceof OzoneChecksumException) {
BlockID blockID = block.getKey().getBlockID();
String datanodeUUID = replica.getKey().getUuidString();
is = getInputStreamWithoutChecksum(replicasWithoutChecksum, datanodeUUID, blockID);
Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} else if (cause instanceof StatusRuntimeException) {
break;
}
} finally {
is.close();
}
replicasJson.add(replicaJson);
}
blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson);
blocks.add(blockJson);
}
}
use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.
the class TestOzoneRpcClientWithRatis method testGetKeyAndFileWithNetworkTopology.
/**
* Tests get the information of key with network topology awareness enabled.
* @throws IOException
*/
@Test
public void testGetKeyAndFileWithNetworkTopology() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
getStore().createVolume(volumeName);
OzoneVolume volume = getStore().getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>())) {
out.write(value.getBytes(UTF_8));
}
// Since the rpc client is outside of cluster, then getFirstNode should be
// equal to getClosestNode.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
// read key with topology aware read enabled
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
// read file with topology aware read enabled
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
// read key with topology aware read disabled
conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, false);
try (OzoneClient newClient = OzoneClientFactory.getRpcClient(conf)) {
ObjectStore newStore = newClient.getObjectStore();
OzoneBucket newBucket = newStore.getVolume(volumeName).getBucket(bucketName);
try (OzoneInputStream is = newBucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read key should succeed");
}
// read file with topology aware read disabled
try (OzoneInputStream is = newBucket.readFile(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8)));
} catch (OzoneChecksumException e) {
fail("Read file should succeed");
}
}
}
use of org.apache.hadoop.ozone.common.OzoneChecksumException in project ozone by apache.
the class BlockOutputStream method writeChunkToContainer.
/**
* Writes buffered data as a new chunk to the container and saves chunk
* information to be used later in putKey call.
*
* @throws IOException if there is an I/O error while performing the call
* @throws OzoneChecksumException if there is an error while computing
* checksum
*/
private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
int effectiveChunkSize = chunk.remaining();
final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
ChecksumData checksumData = checksum.computeChecksum(chunk);
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
}
try {
XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
future.completeExceptionally(sce);
}
return e;
}, responseExecutor).exceptionally(e -> {
String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " " + "into block " + blockID;
LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
CompletionException ce = new CompletionException(msg, e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
containerBlockData.addChunks(chunkInfo);
}
Aggregations