use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class StorageContainerLocationProtocolClientSideTranslatorPB method getPipeline.
@Override
public Pipeline getPipeline(HddsProtos.PipelineID pipelineID) throws IOException {
GetPipelineRequestProto request = GetPipelineRequestProto.newBuilder().setPipelineID(pipelineID).setTraceID(TracingUtil.exportCurrentSpan()).build();
GetPipelineResponseProto response = submitRequest(Type.GetPipeline, builder -> builder.setGetPipelineRequest(request)).getGetPipelineResponse();
return Pipeline.getFromProtobuf(response.getPipeline());
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class TestDatanodeUpgradeToScmHA method testFormattingNewVolumes.
@Test
public void testFormattingNewVolumes() throws Exception {
// / SETUP ///
String originalScmID = startScmServer();
File preFinVolume1 = addVolume();
startPreFinalizedDatanode();
final Pipeline pipeline = getPipeline();
// / PRE-FINALIZED: Write and Read from formatted volume ///
Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
// Add container with data, make sure it can be read and written.
final long containerID = addContainer(pipeline);
ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
readChunk(writeChunk, pipeline);
checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// / PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID ///
// Now SCM and enough other DNs finalize to enable SCM HA. This DN is
// restarted with SCM HA config and gets a different SCM ID.
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
changeScmID();
// A new volume is added that must be formatted.
File preFinVolume2 = addVolume();
restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
Assert.assertEquals(2, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
// Because DN mlv would be behind SCM mlv, only reads are allowed.
readChunk(writeChunk, pipeline);
// On restart, there should have been no changes to the paths already used.
checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// No new containers can be created on this volume since SCM MLV is ahead
// of DN MLV at this point.
// cluster ID should always be used for the new volume since SCM HA is now
// enabled.
checkVolumePathID(preFinVolume2, CLUSTER_ID);
// / FINALIZE ///
closeContainer(containerID, pipeline);
dsm.finalizeUpgrade();
LambdaTestUtils.await(2000, 500, () -> dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA));
// / FINALIZED: Add a new volume and check its formatting ///
// Add a new volume that should be formatted with cluster ID only, since
// DN has finalized.
File finVolume = addVolume();
// Yet another SCM ID is received this time, but it should not matter.
changeScmID();
restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
Assert.assertEquals(3, dsm.getContainer().getVolumeSet().getVolumesList().size());
Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
checkVolumePathID(preFinVolume2, CLUSTER_ID);
checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
// New volume should have been formatted with cluster ID only, since the
// datanode is finalized.
checkVolumePathID(finVolume, CLUSTER_ID);
// / FINALIZED: Read old data and write + read new data ///
// Read container from before upgrade. The upgrade required it to be closed.
readChunk(writeChunk, pipeline);
// Write and read container after upgrade.
long newContainerID = addContainer(pipeline);
ContainerProtos.WriteChunkRequestProto newWriteChunk = putBlock(newContainerID, pipeline);
readChunk(newWriteChunk, pipeline);
// The new container should use cluster ID in its path.
// The volume it is placed on is up to the implementation.
checkContainerPathID(newContainerID, CLUSTER_ID);
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class TestDatanodeUpgradeToScmHA method testReadsDuringFinalization.
@Test
public void testReadsDuringFinalization() throws Exception {
// start DN and SCM
startScmServer();
addVolume();
startPreFinalizedDatanode();
final Pipeline pipeline = getPipeline();
// Add data to read.
final long containerID = addContainer(pipeline);
ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
closeContainer(containerID, pipeline);
// Create thread to keep reading during finalization.
ExecutorService executor = Executors.newFixedThreadPool(1);
Future<Void> readFuture = executor.submit(() -> {
// Layout version check should be thread safe.
while (!dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA)) {
readChunk(writeChunk, pipeline);
}
// Make sure we can read after finalizing too.
readChunk(writeChunk, pipeline);
return null;
});
dsm.finalizeUpgrade();
// If there was a failure reading during the upgrade, the exception will
// be thrown here.
readFuture.get();
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class RpcClient method getKeysEveryReplicas.
@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
Pipeline pipelineBefore = keyLocationInfo.getPipeline();
List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
for (DatanodeDetails dn : datanodes) {
List<DatanodeDetails> nodes = new ArrayList<>();
nodes.add(dn);
Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
keyLocationInfo.setPipeline(pipeline);
List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
keyLocationInfoList.add(keyLocationInfo);
OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
keyLocationInfoGroups.add(keyLocationInfoGroup);
keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
OzoneInputStream is = createInputStream(keyInfo, Function.identity());
blocks.put(dn, is);
}
result.put(keyLocationInfo, blocks);
}
return result;
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class ReplicatedFileChecksumHelper method getChunkInfos.
// copied from BlockInputStream
/**
* Send RPC call to get the block info from the container.
* @return List of chunks in this block.
*/
protected List<ContainerProtos.ChunkInfo> getChunkInfos(OmKeyLocationInfo keyLocationInfo) throws IOException {
// irrespective of the container state, we will always read via Standalone
// protocol.
Token<OzoneBlockTokenIdentifier> token = keyLocationInfo.getToken();
Pipeline pipeline = keyLocationInfo.getPipeline();
BlockID blockID = keyLocationInfo.getBlockID();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()))).build();
}
boolean success = false;
List<ContainerProtos.ChunkInfo> chunks;
XceiverClientSpi xceiverClientSpi = null;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Initializing BlockInputStream for get key to access {}", blockID.getContainerID());
}
xceiverClientSpi = getXceiverClientFactory().acquireClientForReadData(pipeline);
ContainerProtos.DatanodeBlockID datanodeBlockID = blockID.getDatanodeBlockIDProtobuf();
ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls.getBlock(xceiverClientSpi, datanodeBlockID, token);
chunks = response.getBlockData().getChunksList();
success = true;
} finally {
if (!success && xceiverClientSpi != null) {
getXceiverClientFactory().releaseClientForReadData(xceiverClientSpi, false);
}
}
return chunks;
}
Aggregations