use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class DatanodeBlockPutter method call.
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConf = createOzoneConfiguration();
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("datanode-block-putter is not supported in secure environment");
}
try (StorageContainerLocationProtocol scmLocationClient = createStorageContainerLocationClient(ozoneConf)) {
Pipeline pipeline = findPipelineForTest(pipelineId, scmLocationClient, LOG);
try (XceiverClientManager xceiverClientManager = new XceiverClientManager(ozoneConf)) {
client = xceiverClientManager.acquireClient(pipeline);
timer = getMetrics().timer("put-block");
byte[] data = RandomStringUtils.randomAscii(chunkSize).getBytes(StandardCharsets.UTF_8);
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
checksumProtobuf = checksum.computeChecksum(data).getProtoBufMessage();
runTests(this::putBlock);
}
} finally {
if (client != null) {
client.close();
}
}
return null;
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class DatanodeChunkValidator method readReference.
/**
* Read a reference chunk using same name than one from the
* {@link org.apache.hadoop.ozone.freon.DatanodeChunkGenerator}.
*/
private void readReference() throws IOException {
ContainerCommandRequestProto request = createReadChunkRequest(0);
ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
checksum = new Checksum(ContainerProtos.ChecksumType.CRC32, chunkSize);
checksumReference = computeChecksum(response);
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class GeneratorDatanode method call.
@Override
public Void call() throws Exception {
init();
config = createOzoneConfiguration();
BlockManager blockManager = new BlockManagerImpl(config);
chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, null);
final Collection<String> storageDirs = HddsServerUtil.getDatanodeStorageDirs(config);
String firstStorageDir = StorageLocation.parse(storageDirs.iterator().next()).getUri().getPath();
final Path hddsDir = Paths.get(firstStorageDir, "hdds");
if (!Files.exists(hddsDir)) {
throw new NoSuchFileException(hddsDir + " doesn't exist. Please start a real cluster to initialize the " + "VERSION descriptors, and re-start this generator after the files" + " are created (but after cluster is stopped).");
}
scmId = getScmIdFromStoragePath(hddsDir);
final File versionFile = new File(firstStorageDir, "hdds/VERSION");
Properties props = DatanodeVersionFile.readFrom(versionFile);
if (props.isEmpty()) {
throw new InconsistentStorageStateException("Version file " + versionFile + " is missing");
}
String clusterId = HddsVolumeUtil.getProperty(props, OzoneConsts.CLUSTER_ID, versionFile);
datanodeId = HddsVolumeUtil.getProperty(props, OzoneConsts.DATANODE_UUID, versionFile);
volumeSet = new MutableVolumeSet(datanodeId, clusterId, config, null, StorageVolume.VolumeType.DATA_VOLUME, null);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
final OzoneClientConfig ozoneClientConfig = config.getObject(OzoneClientConfig.class);
checksum = new Checksum(ozoneClientConfig.getChecksumType(), ozoneClientConfig.getBytesPerChecksum());
timer = getMetrics().timer("datanode-generator");
runTests(this::generateData);
return null;
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class ContainerTestHelper method setDataChecksum.
/**
* Computes the hash and sets the value correctly.
*
* @param info - chunk info.
* @param data - data array
*/
public static void setDataChecksum(ChunkInfo info, ChunkBuffer data) throws OzoneChecksumException {
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
info.setChecksumData(checksum.computeChecksum(data));
data.rewind();
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class KeyValueContainerCheck method verifyChecksum.
private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException {
ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData());
int checksumCount = checksumData.getChecksums().size();
int bytesPerChecksum = checksumData.getBytesPerChecksum();
Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum);
ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum);
long bytesRead = 0;
try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
channel.position(chunk.getOffset());
}
for (int i = 0; i < checksumCount; i++) {
// limit last read for FILE_PER_BLOCK, to avoid reading next chunk
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) {
buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
}
int v = channel.read(buffer);
if (v == -1) {
break;
}
bytesRead += v;
buffer.flip();
throttler.throttle(v, canceler);
ByteString expected = checksumData.getChecksums().get(i);
ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0);
if (!expected.equals(actual)) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID()));
}
}
if (bytesRead != chunk.getLen()) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()));
}
}
}
Aggregations