use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestContainerStateMachineIdempotency method testContainerStateMachineIdempotency.
@Test
public void testContainerStateMachineIdempotency() throws Exception {
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
long containerID = container.getContainerInfo().getContainerID();
Pipeline pipeline = container.getPipeline();
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
try {
// create the container
ContainerProtocolCalls.createContainer(client, containerID, null);
// call create Container again
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
byte[] data = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(container.getPipeline(), blockID, data.length, null);
client.sendCommand(writeChunkRequest);
// Make the write chunk request again without requesting for overWrite
client.sendCommand(writeChunkRequest);
// Now, explicitly make a putKey request for the block.
ContainerProtos.ContainerCommandRequestProto putKeyRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
client.sendCommand(putKeyRequest).getPutBlock();
// send the putBlock again
client.sendCommand(putKeyRequest);
// close container call
ContainerProtocolCalls.closeContainer(client, containerID, null);
ContainerProtocolCalls.closeContainer(client, containerID, null);
} catch (IOException ioe) {
Assert.fail("Container operation failed" + ioe);
}
xceiverClientManager.releaseClient(client, false);
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestOzoneBlockTokenSecretManager method testGenerateToken.
@Test
public void testGenerateToken() throws Exception {
BlockID blockID = new BlockID(101, 0);
Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(blockID, EnumSet.allOf(AccessModeProto.class), 100);
OzoneBlockTokenIdentifier identifier = OzoneBlockTokenIdentifier.readFieldsProtobuf(new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
// Check basic details.
Assert.assertEquals(OzoneBlockTokenIdentifier.getTokenService(blockID), identifier.getService());
Assert.assertEquals(EnumSet.allOf(AccessModeProto.class), identifier.getAccessModes());
Assert.assertEquals(omCertSerialId, identifier.getCertSerialId());
validateHash(token.getPassword(), token.getIdentifier());
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestOzoneBlockTokenSecretManager method testCreateIdentifierSuccess.
@Test
public void testCreateIdentifierSuccess() throws Exception {
BlockID blockID = new BlockID(101, 0);
OzoneBlockTokenIdentifier btIdentifier = secretManager.createIdentifier("testUser", blockID, EnumSet.allOf(AccessModeProto.class), 100);
// Check basic details.
Assert.assertEquals("testUser", btIdentifier.getOwnerId());
Assert.assertEquals(BlockTokenVerifier.getTokenService(blockID), btIdentifier.getService());
Assert.assertEquals(EnumSet.allOf(AccessModeProto.class), btIdentifier.getAccessModes());
Assert.assertEquals(omCertSerialId, btIdentifier.getCertSerialId());
byte[] hash = secretManager.createPassword(btIdentifier);
validateHash(hash, btIdentifier.getBytes());
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestOzoneBlockTokenSecretManager method testBlockTokenWriteAccessMode.
@Test
public void testBlockTokenWriteAccessMode() throws Exception {
final String testUser2 = "testUser2";
BlockID blockID = new BlockID(102, 0);
Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(testUser2, blockID, EnumSet.of(AccessModeProto.WRITE), 100);
String encodedToken = token.encodeToUrlString();
ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(pipeline, blockID, 100, encodedToken);
ContainerCommandRequestProto readChunkRequest = getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
tokenVerifier.verify(testUser2, token, writeChunkRequest);
BlockTokenException e = assertThrows(BlockTokenException.class, () -> tokenVerifier.verify(testUser2, token, readChunkRequest));
String msg = e.getMessage();
assertTrue(msg, msg.contains("doesn't have READ permission"));
}
use of org.apache.hadoop.hdds.client.BlockID in project ozone by apache.
the class TestOzoneBlockTokenSecretManager method testExpiredCertificate.
@Test
public void testExpiredCertificate() throws Exception {
String user = "testUser2";
BlockID blockID = new BlockID(102, 0);
Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(user, blockID, EnumSet.allOf(AccessModeProto.class), 100);
ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(pipeline, blockID, 100, token.encodeToUrlString());
tokenVerifier.verify("testUser", token, writeChunkRequest);
// Mock client with an expired cert
X509Certificate expiredCert = generateExpiredCert("CN=OzoneMaster", keyPair, ALGORITHM);
when(client.getCertificate(anyString())).thenReturn(expiredCert);
BlockTokenException e = assertThrows(BlockTokenException.class, () -> tokenVerifier.verify(user, token, writeChunkRequest));
String msg = e.getMessage();
assertTrue(msg, msg.contains("Token can't be verified due to" + " expired certificate"));
}
Aggregations