use of org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext in project ozone by apache.
the class FilePerBlockStrategy method writeChunk.
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
if (info.getLen() <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip writing empty chunk {} in stage {}", info, stage);
}
return;
}
if (stage == COMMIT_DATA) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ignore chunk {} in stage {}", info, stage);
}
return;
}
KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData();
File chunkFile = getChunkFile(container, blockID, info);
boolean overwrite = validateChunkForOverwrite(chunkFile, info);
long len = info.getLen();
long offset = info.getOffset();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} (overwrite: {}) in stage {} to file {}", info, overwrite, stage, chunkFile);
}
HddsVolume volume = containerData.getVolume();
FileChannel channel = null;
try {
channel = files.getChannel(chunkFile, doSyncWrite);
} catch (IOException e) {
onFailure(volume);
throw e;
}
// check whether offset matches block file length if its an overwrite
if (!overwrite) {
ChunkUtils.validateChunkSize(chunkFile, info);
}
ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len, volume);
containerData.updateWriteStats(len, overwrite);
}
use of org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext in project ozone by apache.
the class FilePerChunkStrategy method writeChunk.
/**
* writes a given chunk.
*
* @param container - Container for the chunk
* @param blockID - ID of the block
* @param info - ChunkInfo
* @param data - data of the chunk
* @param dispatcherContext - dispatcherContextInfo
* @throws StorageContainerException
*/
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
try {
KeyValueContainer kvContainer = (KeyValueContainer) container;
KeyValueContainerData containerData = kvContainer.getContainerData();
HddsVolume volume = containerData.getVolume();
File chunkFile = getChunkFile(kvContainer, blockID, info);
boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(chunkFile, info);
File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
if (LOG.isDebugEnabled()) {
LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", info.getChunkName(), stage, chunkFile, tmpChunkFile);
}
long len = info.getLen();
// ignore offset in chunk info
long offset = 0;
switch(stage) {
case WRITE_DATA:
if (isOverwrite) {
// if the actual chunk file already exists here while writing the temp
// chunk file, then it means the same ozone client request has
// generated two raft log entries. This can happen either because
// retryCache expired in Ratis (or log index mismatch/corruption in
// Ratis). This can be solved by two approaches as of now:
// 1. Read the complete data in the actual chunk file ,
// verify the data integrity and in case it mismatches , either
// 2. Delete the chunk File and write the chunk again. For now,
// let's rewrite the chunk file
// TODO: once the checksum support for write chunks gets plugged in,
// the checksum needs to be verified for the actual chunk file and
// the data to be written here which should be efficient and
// it matches we can safely return without rewriting.
LOG.warn("ChunkFile already exists {}. Deleting it.", chunkFile);
FileUtil.fullyDelete(chunkFile);
}
if (tmpChunkFile.exists()) {
// If the tmp chunk file already exists it means the raft log got
// appended, but later on the log entry got truncated in Ratis leaving
// behind garbage.
// TODO: once the checksum support for data chunks gets plugged in,
// instead of rewriting the chunk here, let's compare the checkSums
LOG.warn("tmpChunkFile already exists {}. Overwriting it.", tmpChunkFile);
}
// Initially writes to temporary chunk file.
ChunkUtils.writeData(tmpChunkFile, data, offset, len, volume, doSyncWrite);
// committed here.
break;
case COMMIT_DATA:
// to actual chunk file.
if (isOverwrite) {
// if the actual chunk file already exists , it implies the write
// chunk transaction in the containerStateMachine is getting
// reapplied. This can happen when a node restarts.
// TODO: verify the checkSums for the existing chunkFile and the
// chunkInfo to be committed here
LOG.warn("ChunkFile already exists {}", chunkFile);
return;
}
// While committing a chunk , just rename the tmp chunk file which has
// the same term and log index appended as the current transaction
commitChunk(tmpChunkFile, chunkFile);
// Increment container stats here, as we commit the data.
containerData.updateWriteStats(len, isOverwrite);
break;
case COMBINED:
// directly write to the chunk file
ChunkUtils.writeData(chunkFile, data, offset, len, volume, doSyncWrite);
containerData.updateWriteStats(len, isOverwrite);
break;
default:
throw new IOException("Can not identify write operation.");
}
} catch (StorageContainerException ex) {
throw ex;
} catch (IOException ex) {
throw new StorageContainerException("Internal error: ", ex, IO_EXCEPTION);
}
}
use of org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext in project ozone by apache.
the class ChunkManagerDummyImpl method writeChunk.
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
ContainerData containerData = container.getContainerData();
if (stage == DispatcherContext.WriteChunkStage.WRITE_DATA || stage == DispatcherContext.WriteChunkStage.COMBINED) {
ChunkUtils.validateBufferSize(info.getLen(), data.remaining());
HddsVolume volume = containerData.getVolume();
VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
volumeIOStats.incWriteOpCount();
volumeIOStats.incWriteBytes(info.getLen());
}
if (stage == DispatcherContext.WriteChunkStage.COMMIT_DATA || stage == DispatcherContext.WriteChunkStage.COMBINED) {
containerData.updateWriteStats(info.getLen(), false);
}
}
use of org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext in project ozone by apache.
the class TestHddsDispatcher method testContainerNotFoundWithCommitChunk.
@Test
public void testContainerNotFoundWithCommitChunk() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(dd.getUuidString(), 1L, 1L);
// send read chunk request and make sure container does not exist
ContainerCommandResponseProto response = hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null);
Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult());
DispatcherContext dispatcherContext = new DispatcherContext.Builder().setContainer2BCSIDMap(Collections.emptyMap()).setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(HddsDispatcher.LOG);
// send write chunk request without sending create container
response = hddsDispatcher.dispatch(writeChunkRequest, dispatcherContext);
// container should not be found
Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult());
assertTrue(logCapturer.getOutput().contains("ContainerID " + writeChunkRequest.getContainerID() + " does not exist"));
} finally {
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.
/**
* Creates a container with normal and deleted blocks.
* First it will insert normal blocks, and then it will insert
* deleted blocks.
*/
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
long totalBlocks = normalBlocks + deletedBlocks;
int bytesPerChecksum = 2 * UNIT_LEN;
Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
ChecksumData checksumData = checksum.computeChecksum(chunkData);
DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
assertNotNull(containerData.getChunksPath());
File chunksPath = new File(containerData.getChunksPath());
containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < totalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
chunkList.clear();
for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * CHUNK_LEN;
ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
info.setChecksumData(checksumData);
chunkList.add(info.getProtoBufMessage());
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
}
blockData.setChunks(chunkList);
// normal key
String key = Long.toString(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
}
return container;
}
Aggregations