use of org.apache.hadoop.ozone.container.common.volume.HddsVolume in project ozone by apache.
the class ContainerCommands method loadContainersFromVolumes.
public void loadContainersFromVolumes() throws IOException {
OzoneConfiguration conf = parent.getOzoneConf();
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
String firstStorageDir = getFirstStorageDir(conf);
String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
String clusterId = getClusterId(firstStorageDir);
volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
final Handler handler = Handler.getHandlerForContainerType(containerType, conf, datanodeUuid, containerSet, volumeSet, metrics, containerReplicaProto -> {
});
handler.setClusterID(clusterId);
handlers.put(containerType, handler);
}
controller = new ContainerController(containerSet, handlers);
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
Iterator<HddsVolume> volumeSetIterator = volumes.iterator();
LOG.info("Starting the read all the container metadata");
while (volumeSetIterator.hasNext()) {
HddsVolume volume = volumeSetIterator.next();
LOG.info("Loading container metadata from volume " + volume.toString());
final ContainerReader reader = new ContainerReader(volumeSet, volume, containerSet, conf);
reader.run();
}
LOG.info("All the container metadata is loaded.");
}
use of org.apache.hadoop.ozone.container.common.volume.HddsVolume in project ozone by apache.
the class ContainerData method commitSpace.
/**
* add available space in the container to the committed space in the volume.
* available space is the number of bytes remaining till max capacity.
*/
public void commitSpace() {
long unused = getMaxSize() - getBytesUsed();
ContainerDataProto.State myState = getState();
HddsVolume cVol;
// we don't expect duplicate calls
Preconditions.checkState(!committedSpace);
// Only Open Containers have Committed Space
if (myState != ContainerDataProto.State.OPEN) {
return;
}
// junit tests do not always set up volume
cVol = getVolume();
if (unused > 0 && (cVol != null)) {
cVol.incCommittedBytes(unused);
committedSpace = true;
}
}
use of org.apache.hadoop.ozone.container.common.volume.HddsVolume in project ozone by apache.
the class KeyValueContainer method create.
@Override
public void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy, String clusterId) throws StorageContainerException {
Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " + "cannot be null");
Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null");
Preconditions.checkNotNull(clusterId, "clusterId cannot be null");
File containerMetaDataPath = null;
// acquiring volumeset read lock
long maxSize = containerData.getMaxSize();
volumeSet.readLock();
try {
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), maxSize);
String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
long containerID = containerData.getContainerID();
String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(containerVolume, clusterId);
containerMetaDataPath = KeyValueContainerLocationUtil.getContainerMetaDataPath(hddsVolumeDir, idDir, containerID);
containerData.setMetadataPath(containerMetaDataPath.getPath());
File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(hddsVolumeDir, idDir, containerID);
// Check if it is new Container.
ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
// Create Metadata path chunks path and metadata db
File dbFile = getContainerDBFile();
containerData.setSchemaVersion(VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion());
KeyValueContainerUtil.createContainerMetaData(containerID, containerMetaDataPath, chunksPath, dbFile, containerData.getSchemaVersion(), config);
// Set containerData for the KeyValueContainer.
containerData.setChunksPath(chunksPath.getPath());
containerData.setDbFile(dbFile);
containerData.setVolume(containerVolume);
// Create .container file
File containerFile = getContainerFile();
createContainerFile(containerFile);
} catch (StorageContainerException ex) {
if (containerMetaDataPath != null && containerMetaDataPath.getParentFile().exists()) {
FileUtil.fullyDelete(containerMetaDataPath.getParentFile());
}
throw ex;
} catch (DiskOutOfSpaceException ex) {
throw new StorageContainerException("Container creation failed, due to " + "disk out of space", ex, DISK_OUT_OF_SPACE);
} catch (FileAlreadyExistsException ex) {
throw new StorageContainerException("Container creation failed because " + "ContainerFile already exists", ex, CONTAINER_ALREADY_EXISTS);
} catch (IOException ex) {
if (containerMetaDataPath != null && containerMetaDataPath.getParentFile().exists()) {
FileUtil.fullyDelete(containerMetaDataPath.getParentFile());
}
throw new StorageContainerException("Container creation failed. " + ex.getMessage(), ex, CONTAINER_INTERNAL_ERROR);
} finally {
volumeSet.readUnlock();
}
}
use of org.apache.hadoop.ozone.container.common.volume.HddsVolume in project ozone by apache.
the class FilePerBlockStrategy method writeChunk.
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
if (info.getLen() <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip writing empty chunk {} in stage {}", info, stage);
}
return;
}
if (stage == COMMIT_DATA) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ignore chunk {} in stage {}", info, stage);
}
return;
}
KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData();
File chunkFile = getChunkFile(container, blockID, info);
boolean overwrite = validateChunkForOverwrite(chunkFile, info);
long len = info.getLen();
long offset = info.getOffset();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} (overwrite: {}) in stage {} to file {}", info, overwrite, stage, chunkFile);
}
HddsVolume volume = containerData.getVolume();
FileChannel channel = null;
try {
channel = files.getChannel(chunkFile, doSyncWrite);
} catch (IOException e) {
onFailure(volume);
throw e;
}
// check whether offset matches block file length if its an overwrite
if (!overwrite) {
ChunkUtils.validateChunkSize(chunkFile, info);
}
ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len, volume);
containerData.updateWriteStats(len, overwrite);
}
use of org.apache.hadoop.ozone.container.common.volume.HddsVolume in project ozone by apache.
the class FilePerChunkStrategy method writeChunk.
/**
* writes a given chunk.
*
* @param container - Container for the chunk
* @param blockID - ID of the block
* @param info - ChunkInfo
* @param data - data of the chunk
* @param dispatcherContext - dispatcherContextInfo
* @throws StorageContainerException
*/
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
Preconditions.checkNotNull(dispatcherContext);
DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
try {
KeyValueContainer kvContainer = (KeyValueContainer) container;
KeyValueContainerData containerData = kvContainer.getContainerData();
HddsVolume volume = containerData.getVolume();
File chunkFile = getChunkFile(kvContainer, blockID, info);
boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(chunkFile, info);
File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
if (LOG.isDebugEnabled()) {
LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", info.getChunkName(), stage, chunkFile, tmpChunkFile);
}
long len = info.getLen();
// ignore offset in chunk info
long offset = 0;
switch(stage) {
case WRITE_DATA:
if (isOverwrite) {
// if the actual chunk file already exists here while writing the temp
// chunk file, then it means the same ozone client request has
// generated two raft log entries. This can happen either because
// retryCache expired in Ratis (or log index mismatch/corruption in
// Ratis). This can be solved by two approaches as of now:
// 1. Read the complete data in the actual chunk file ,
// verify the data integrity and in case it mismatches , either
// 2. Delete the chunk File and write the chunk again. For now,
// let's rewrite the chunk file
// TODO: once the checksum support for write chunks gets plugged in,
// the checksum needs to be verified for the actual chunk file and
// the data to be written here which should be efficient and
// it matches we can safely return without rewriting.
LOG.warn("ChunkFile already exists {}. Deleting it.", chunkFile);
FileUtil.fullyDelete(chunkFile);
}
if (tmpChunkFile.exists()) {
// If the tmp chunk file already exists it means the raft log got
// appended, but later on the log entry got truncated in Ratis leaving
// behind garbage.
// TODO: once the checksum support for data chunks gets plugged in,
// instead of rewriting the chunk here, let's compare the checkSums
LOG.warn("tmpChunkFile already exists {}. Overwriting it.", tmpChunkFile);
}
// Initially writes to temporary chunk file.
ChunkUtils.writeData(tmpChunkFile, data, offset, len, volume, doSyncWrite);
// committed here.
break;
case COMMIT_DATA:
// to actual chunk file.
if (isOverwrite) {
// if the actual chunk file already exists , it implies the write
// chunk transaction in the containerStateMachine is getting
// reapplied. This can happen when a node restarts.
// TODO: verify the checkSums for the existing chunkFile and the
// chunkInfo to be committed here
LOG.warn("ChunkFile already exists {}", chunkFile);
return;
}
// While committing a chunk , just rename the tmp chunk file which has
// the same term and log index appended as the current transaction
commitChunk(tmpChunkFile, chunkFile);
// Increment container stats here, as we commit the data.
containerData.updateWriteStats(len, isOverwrite);
break;
case COMBINED:
// directly write to the chunk file
ChunkUtils.writeData(chunkFile, data, offset, len, volume, doSyncWrite);
containerData.updateWriteStats(len, isOverwrite);
break;
default:
throw new IOException("Can not identify write operation.");
}
} catch (StorageContainerException ex) {
throw ex;
} catch (IOException ex) {
throw new StorageContainerException("Internal error: ", ex, IO_EXCEPTION);
}
}
Aggregations