use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerHardDeleteTest method endToEndTestHardDeletes.
/**
* Tests the hard delete functionality.
* <p>
* This test does the following:
* 1. Makes 6 puts, waits for notification.
* 2. Makes 2 deletes, waits for notification.
* 3. Waits for hard deletes to catch up to the expected token value.
* 4. Verifies that the two records that are deleted are zeroed out by hard deletes.
* 5. Makes 3 more puts, waits for notification.
* 6. Makes 3 deletes - 2 of records from the initial set of puts, and 1 from the new set.
* 7. Waits for hard deletes to catch up again to the expected token value.
* 8. Verifies that the three records that are deleted are zeroed out by hard deletes.
*
* @throws Exception
*/
@Test
public void endToEndTestHardDeletes() throws Exception {
DataNodeId dataNodeId = mockClusterMap.getDataNodeIds().get(0);
encryptionKey = new ArrayList<>(9);
usermetadata = new ArrayList<>(9);
data = new ArrayList<>(9);
Random random = new Random();
for (int i = 0; i < 9; i++) {
if (i % 2 == 0) {
encryptionKey.add(new byte[100]);
random.nextBytes(encryptionKey.get(i));
} else {
encryptionKey.add(null);
}
usermetadata.add(new byte[1000 + i]);
data.add(new byte[31870 + i]);
random.nextBytes(usermetadata.get(i));
random.nextBytes(data.get(i));
}
properties = new ArrayList<>(9);
properties.add(new BlobProperties(31870, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31871, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
properties.add(new BlobProperties(31872, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31873, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
properties.add(new BlobProperties(31874, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31875, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
properties.add(new BlobProperties(31876, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31877, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
properties.add(new BlobProperties(31878, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
List<PartitionId> partitionIds = mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
PartitionId chosenPartition = partitionIds.get(0);
blobIdList = new ArrayList<>(9);
for (int i = 0; i < 9; i++) {
blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), properties.get(i).getAccountId(), properties.get(i).getContainerId(), chosenPartition, false, BlobId.BlobDataType.DATACHUNK));
}
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(dataNodeId.getPort(), PortType.PLAINTEXT), "localhost", null, null);
channel.connect();
for (int i = 0; i < 6; i++) {
// blob 3 and 5 are expired among these
putBlob(blobIdList.get(i), properties.get(i), encryptionKey.get(i), usermetadata.get(i), data.get(i), channel);
}
notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
// delete blob 1
deleteBlob(blobIdList.get(1), channel);
zeroOutBlobContent(1);
// delete blob 4
deleteBlob(blobIdList.get(4), channel);
zeroOutBlobContent(4);
notificationSystem.awaitBlobDeletions(blobIdList.get(1).getID());
notificationSystem.awaitBlobDeletions(blobIdList.get(4).getID());
time.sleep(TimeUnit.DAYS.toMillis(7));
// For each future change to this offset, add to this variable and write an explanation of why the number changed.
// old value: 198728. Increased by 4 to 198732 because the format for delete record went from 2 to 3 which adds
// 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not (yet) added
//
// Add 14 here when changing message header version to 3, since the message header version went from 2 to 3 and adds
// a short to every record, which include 6 puts and 1 delete. (last delete is not included).
// old value is 198732 + 14. Increased by 48 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
// There are 6 * (4 + 4). 6 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes for each field.
int expectedTokenValueT1 = 198732 + 14 + 48;
ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT1);
getAndVerify(channel, 6);
// put blob 6
putBlob(blobIdList.get(6), properties.get(6), encryptionKey.get(6), usermetadata.get(6), data.get(6), channel);
// put blob 7
putBlob(blobIdList.get(7), properties.get(7), encryptionKey.get(7), usermetadata.get(7), data.get(7), channel);
// put blob 8
putBlob(blobIdList.get(8), properties.get(8), encryptionKey.get(8), usermetadata.get(8), data.get(8), channel);
notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
// Do more deletes
// delete blob 3 that is expired.
deleteBlob(blobIdList.get(3), channel);
zeroOutBlobContent(3);
// delete blob 0, will undelete it later, so don't zero out the content
deleteBlob(blobIdList.get(0), channel);
// delete blob 6.
deleteBlob(blobIdList.get(6), channel);
zeroOutBlobContent(6);
notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
notificationSystem.awaitBlobDeletions(blobIdList.get(6).getID());
undeleteBlob(blobIdList.get(0), channel);
notificationSystem.awaitBlobUndeletes(blobIdList.get(0).getID());
time.sleep(TimeUnit.DAYS.toMillis(1));
// For each future change to this offset, add to this variable and write an explanation of why the number changed.
int expectedTokenValueT2 = 298416 + 98 + 28 + 72;
// old value: 298400. Increased by 16 (4 * 4) to 298416 because the format for delete record went from 2 to 3 which
// adds 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not added
//
// old value 298416. Increased by 98. The end offset is now a journal-based offset, so the offset is not inclusive.
// It points to the last record in the journal. Before adding an undelete record, the last record in journal is the
// delete record for blob 6, now it's undelete for blob 0. Since a delete record is 98 bytes, so increase 98 bytes.
//
// old value is 298416 + 98. Increased by 28 when changing the message header version from 2 to 3, which adds a short
// to all the records, which includes 9 puts and 5 deletes and 1 undelete. Undelete is not include since it's the last
// record.
// old value is 298416 + 98 + 28. Increased by 72 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
// There are 9 * (4 + 4). 9 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes.
ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT2);
getAndVerify(channel, 9);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerPlaintextTest method endToEndTest.
@Test
public void endToEndTest() throws Exception {
plaintextCluster.startServers();
DataNodeId dataNodeId = plaintextCluster.getGeneralDataNode();
ServerTestUtil.endToEndTest(new Port(dataNodeId.getPort(), PortType.PLAINTEXT), "DC1", plaintextCluster, null, null, routerProps, testEncryption);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerPlaintextTest method endToEndReplicationWithMultiNodeMultiPartitionTest.
@Test
public void endToEndReplicationWithMultiNodeMultiPartitionTest() throws Exception {
plaintextCluster.startServers();
DataNodeId dataNode = plaintextCluster.getClusterMap().getDataNodeIds().get(0);
ArrayList<String> dataCenterList = Utils.splitString("DC1,DC2,DC3", ",");
List<DataNodeId> dataNodes = plaintextCluster.getOneDataNodeFromEachDatacenter(dataCenterList);
ServerTestUtil.endToEndReplicationWithMultiNodeMultiPartitionTest(dataNode.getPort(), new Port(dataNodes.get(0).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(1).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(2).getPort(), PortType.PLAINTEXT), plaintextCluster, null, null, null, null, null, null, notificationSystem, testEncryption);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerSSLTest method endToEndSSLReplicationWithMultiNodeMultiPartitionTest.
@Test
public void endToEndSSLReplicationWithMultiNodeMultiPartitionTest() throws Exception {
sslCluster.startServers();
DataNodeId dataNode = sslCluster.getClusterMap().getDataNodeIds().get(0);
ArrayList<String> dataCenterList = new ArrayList<>(Arrays.asList("DC1", "DC2", "DC3"));
List<DataNodeId> dataNodes = sslCluster.getOneDataNodeFromEachDatacenter(dataCenterList);
ServerTestUtil.endToEndReplicationWithMultiNodeMultiPartitionTest(dataNode.getPort(), new Port(dataNodes.get(0).getSSLPort(), PortType.SSL), new Port(dataNodes.get(1).getSSLPort(), PortType.SSL), new Port(dataNodes.get(2).getSSLPort(), PortType.SSL), sslCluster, clientSSLConfig1, clientSSLConfig2, clientSSLConfig3, clientSSLSocketFactory1, clientSSLSocketFactory2, clientSSLSocketFactory3, notificationSystem, testEncryption);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerSSLTest method endToEndCloudBackupTest.
/**
* Do end to end cloud backup test
*/
@Test
public void endToEndCloudBackupTest() throws Exception {
assumeTrue(testEncryption);
sslCluster.startServers();
DataNodeId dataNode = sslCluster.getClusterMap().getDataNodeIds().get(0);
// Start ZK Server.
int zkPort = 31998;
String zkConnectString = "localhost:" + zkPort;
String vcrClusterName = "vcrTestClusterSSL";
TestUtils.ZkInfo zkInfo = new TestUtils.ZkInfo(TestUtils.getTempDir("helixVcr"), "DC1", (byte) 1, zkPort, true);
ServerTestUtil.endToEndCloudBackupTest(sslCluster, zkConnectString, vcrClusterName, dataNode, clientSSLConfig2, clientSSLSocketFactory2, notificationSystem, serverSSLProps, false);
ServerTestUtil.endToEndCloudBackupTest(sslCluster, zkConnectString, vcrClusterName, dataNode, clientSSLConfig2, clientSSLSocketFactory2, notificationSystem, serverSSLProps, true);
zkInfo.shutdown();
}
Aggregations