use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.
the class TestFilePerChunkStrategy method deletesChunkFileWithLengthIncludingOffset.
/**
* Tests that "new datanode" can delete chunks written to "old
* datanode" by "new client" (ie. where chunk file accidentally created with
* {@code size = chunk offset + chunk length}, instead of only chunk length).
*/
@Test
public void deletesChunkFileWithLengthIncludingOffset() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
long offset = 1024;
ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen());
File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true);
checkChunkFileCount(1);
assertTrue(file.exists());
assertEquals(offset + chunkInfo.getLen(), file.length());
// WHEN
chunkManager.deleteChunk(container, blockID, oldDatanodeChunkInfo);
// THEN
checkChunkFileCount(0);
assertFalse(file.exists());
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.
the class TestContainerReader method setup.
@Before
public void setup() throws Exception {
File volumeDir = tempDir.newFolder();
volumeSet = Mockito.mock(MutableVolumeSet.class);
containerSet = new ContainerSet();
conf = new OzoneConfiguration();
datanodeId = UUID.randomUUID();
hddsVolume = new HddsVolume.Builder(volumeDir.getAbsolutePath()).conf(conf).datanodeUuid(datanodeId.toString()).clusterID(clusterId).build();
volumeSet = mock(MutableVolumeSet.class);
volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
for (int i = 0; i < 2; i++) {
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
List<Long> blkNames;
if (i % 2 == 0) {
blkNames = addBlocks(keyValueContainer, true);
markBlocksForDelete(keyValueContainer, true, blkNames, i);
} else {
blkNames = addBlocks(keyValueContainer, false);
markBlocksForDelete(keyValueContainer, false, blkNames, i);
}
// Close the RocksDB instance for this container and remove from the cache
// so it does not affect the ContainerReader, which avoids using the cache
// at startup for performance reasons.
BlockUtils.removeDB(keyValueContainerData, conf);
}
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.
the class AbstractTestChunkManager method setUp.
@Before
public final void setUp() throws Exception {
OzoneConfiguration config = new OzoneConfiguration();
getStrategy().updateConfig(config);
UUID datanodeId = UUID.randomUUID();
hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
VolumeSet volumeSet = mock(MutableVolumeSet.class);
RoundRobinVolumeChoosingPolicy volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, UUID.randomUUID().toString());
header = "my header".getBytes(UTF_8);
byte[] bytes = "testing write chunks".getBytes(UTF_8);
data = ByteBuffer.allocate(header.length + bytes.length).put(header).put(bytes);
rewindBufferToDataStart();
// Creating BlockData
blockID = new BlockID(1L, 1L);
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, bytes.length);
}
use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer in project ozone by apache.
the class TestBlockManagerImpl method setUp.
@Before
public void setUp() throws Exception {
config = new OzoneConfiguration();
UUID datanodeId = UUID.randomUUID();
HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
volumeSet = mock(MutableVolumeSet.class);
volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
// Creating BlockData
blockID = new BlockID(1L, 1L);
blockData = new BlockData(blockID);
blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
// Creating BlockData
blockID1 = new BlockID(1L, 2L);
blockData1 = new BlockData(blockID1);
blockData1.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData1.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList1 = new ArrayList<>();
ChunkInfo info1 = new ChunkInfo(String.format("%d.data.%d", blockID1.getLocalID(), 0), 0, 1024);
chunkList1.add(info1.getProtoBufMessage());
blockData1.setChunks(chunkList1);
blockData1.setBlockCommitSequenceId(1);
// Create KeyValueContainerManager
blockManager = new BlockManagerImpl(config);
}
Aggregations