use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class GetBlobOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @param blobType the {@link BlobType} for the blob to upload.
* @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
* an encrypted blob).
*/
private void doDirectPut(BlobType blobType, ByteBuf blobContent) throws Exception {
List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(null);
PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), blobType == BlobType.MetadataBlob ? BlobId.BlobDataType.METADATA : BlobId.BlobDataType.DATACHUNK);
blobIdStr = blobId.getID();
Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
ByteBuffer blobEncryptionKey = null;
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
if (blobProperties.isEncrypted()) {
FutureResult<EncryptJob.EncryptJobResult> futureResult = new FutureResult<>();
cryptoJobHandler.submitJob(new EncryptJob(blobProperties.getAccountId(), blobProperties.getContainerId(), blobType == BlobType.MetadataBlob ? null : blobContent.retainedDuplicate(), userMetadataBuf.duplicate(), kms.getRandomKey(), cryptoService, kms, null, new CryptoJobMetricsTracker(routerMetrics.encryptJobMetrics), futureResult::done));
EncryptJob.EncryptJobResult result = futureResult.get(5, TimeUnit.SECONDS);
blobEncryptionKey = result.getEncryptedKey();
if (blobType != BlobType.MetadataBlob) {
blobContent.release();
blobContent = result.getEncryptedBlobContent();
}
userMetadataBuf = result.getEncryptedUserMetadata();
}
while (servers.hasNext()) {
MockServer server = servers.next();
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), blobType, blobEncryptionKey == null ? null : blobEncryptionKey.duplicate());
// Make sure we release the BoundedNettyByteBufReceive.
server.send(request).release();
request.release();
}
blobContent.release();
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class DeleteManagerTest method init.
/**
* Initializes ClusterMap, Router, mock servers, and an {@code BlobId} to be deleted.
*/
@Before
public void init() throws Exception {
VerifiableProperties vProps = new VerifiableProperties(getNonBlockingRouterProperties());
mockTime = new MockTime();
mockSelectorState = new AtomicReference<MockSelectorState>(MockSelectorState.Good);
clusterMap = new MockClusterMap();
serverLayout = new MockServerLayout(clusterMap);
RouterConfig routerConfig = new RouterConfig(vProps);
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(clusterMap, routerConfig), new MockNetworkClientFactory(vProps, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, serverLayout, mockTime), new LoggingNotificationSystem(), clusterMap, null, null, null, new InMemAccountService(false, true), mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
List<PartitionId> mockPartitions = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
partition = mockPartitions.get(ThreadLocalRandom.current().nextInt(mockPartitions.size()));
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), partition, false, BlobId.BlobDataType.DATACHUNK);
blobIdString = blobId.getID();
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ServerTestUtil method undeleteCornerCasesTest.
static void undeleteCornerCasesTest(MockCluster cluster, PortType portType, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
MockClusterMap clusterMap = cluster.getClusterMap();
byte[] userMetadata = new byte[1000];
byte[] data = new byte[31870];
byte[] encryptionKey = new byte[100];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
TestUtils.RANDOM.nextBytes(encryptionKey);
}
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
Map<String, List<DataNodeId>> dataNodesPerDC = clusterMap.getDataNodes().stream().collect(Collectors.groupingBy(DataNodeId::getDatacenterName));
Map<String, Pair<SSLConfig, SSLSocketFactory>> sslSettingPerDC = new HashMap<>();
sslSettingPerDC.put("DC1", new Pair<>(clientSSLConfig1, clientSSLSocketFactory1));
sslSettingPerDC.put("DC2", new Pair<>(clientSSLConfig2, clientSSLSocketFactory2));
sslSettingPerDC.put("DC3", new Pair<>(clientSSLConfig3, clientSSLSocketFactory3));
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
DataNodeId dataNodeId = dataNodesPerDC.get("DC1").get(0);
Router router = null;
try {
Properties routerProperties = getRouterProps("DC1");
routerProperties.putAll(routerProps);
VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
AccountService accountService = new InMemAccountService(false, true);
router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
// channels to all datanodes
List<ConnectedChannel> channels = new ArrayList<>();
for (Map.Entry<String, List<DataNodeId>> entry : dataNodesPerDC.entrySet()) {
Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(entry.getKey());
for (DataNodeId node : entry.getValue()) {
ConnectedChannel connectedChannel = getBlockingChannelBasedOnPortType(portType, node, pair.getSecond(), pair.getFirst());
connectedChannel.connect();
channels.add(connectedChannel);
}
}
// ////////////////////////////////////////////////////
// Corner case 1: When only one datacenter has delete
// ////////////////////////////////////////////////////
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
ConnectedChannel channel = getBlockingChannelBasedOnPortType(portType, dataNodeId, clientSSLSocketFactory1, clientSSLConfig1);
channel.connect();
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
notificationSystem.awaitBlobCreations(blobId1.toString());
// Now stop the replications this partition.
PartitionId partitionId = blobId1.getPartition();
controlReplicationForPartition(channels, partitionId, false);
// Now send the delete to two data nodes in the same DC
List<DataNodeId> toBeDeleteDataNodes = dataNodesPerDC.values().stream().findFirst().get();
Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(toBeDeleteDataNodes.get(0).getDatacenterName());
ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(0), pair.getSecond(), pair.getFirst());
channel1.connect();
ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(1), pair.getSecond(), pair.getFirst());
channel2.connect();
DeleteRequest deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
DataInputStream stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
DeleteRequest deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId1, deleteRequest1.getDeletionTimeInMs());
stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
// Now send the undelete operation through router, and it should fail because of not deleted error.
Future<Void> future = router.undeleteBlob(blobId1.toString(), "service");
try {
future.get();
fail("Undelete blob " + blobId1.toString() + " should fail");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof RouterException);
assertEquals(RouterErrorCode.BlobNotDeleted, ((RouterException) e.getCause()).getErrorCode());
}
// Now see if either data node 1 or data node 2 has undelete or not, if so, undelete would replicate. If not,
// delete would replicate.
List<PartitionRequestInfo> partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId1);
boolean hasUndelete = false;
for (ConnectedChannel connectedChannel : new ConnectedChannel[] { channel1, channel2 }) {
GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
hasUndelete = getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == (short) 1;
if (hasUndelete) {
break;
}
}
releaseNettyBufUnderneathStream(stream);
// Now restart the replication
controlReplicationForPartition(channels, partitionId, true);
if (hasUndelete) {
notificationSystem.awaitBlobUndeletes(blobId1.toString());
} else {
notificationSystem.awaitBlobDeletions(blobId1.toString());
}
for (ConnectedChannel connectedChannel : channels) {
GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
if (hasUndelete) {
assertEquals((short) 1, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
} else {
assertEquals((short) 0, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
}
}
// ///////////////////////////////////////////////////////////
// Corner case 2: two data nodes have different life versions
// //////////////////////////////////////////////////////////
BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
putRequest = new PutRequest(1, "client1", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
notificationSystem.awaitBlobCreations(blobId2.toString());
// Now delete this blob on all servers.
DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
notificationSystem.awaitBlobDeletions(blobId2.toString());
// Now stop the replication
partitionId = blobId2.getPartition();
controlReplicationForPartition(channels, partitionId, false);
// Now send the undelete to two data nodes in the same DC and then send delete
UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, System.currentTimeMillis());
stream = channel1.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals((short) 1, undeleteResponse.getLifeVersion());
undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, undeleteRequest.getOperationTimeMs());
stream = channel2.sendAndReceive(undeleteRequest).getInputStream();
undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals((short) 1, undeleteResponse.getLifeVersion());
deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId2, deleteRequest1.getDeletionTimeInMs());
stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
// Now send the undelete operation through router, and it should fail because of lifeVersion conflict error.
future = router.undeleteBlob(blobId2.toString(), "service");
try {
future.get();
fail("Undelete blob " + blobId2.toString() + " should fail");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof RouterException);
assertEquals(RouterErrorCode.LifeVersionConflict, ((RouterException) e.getCause()).getErrorCode());
}
// Now restart the replication
controlReplicationForPartition(channels, partitionId, true);
notificationSystem.awaitBlobUndeletes(blobId2.toString());
// Now after replication is resumed, the undelete of lifeversion 2 will eventually be replicated to all servers.
partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId2);
for (ConnectedChannel connectedChannel : channels) {
// Even if the notificationSystem acknowledged the undelete, it might be triggered by undelete at lifeversion 1.
// So check in a loop with a time out.
long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
while (true) {
GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
if (getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == 2) {
assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
break;
} else {
Thread.sleep(1000);
if (System.currentTimeMillis() > deadline) {
throw new TimeoutException("Fail to get blob " + blobId2 + " at lifeversion 2 at " + connectedChannel.getRemoteHost());
}
}
}
}
releaseNettyBufUnderneathStream(stream);
for (ConnectedChannel connectedChannel : channels) {
connectedChannel.disconnect();
}
channel1.disconnect();
channel2.disconnect();
channel.disconnect();
} catch (Exception e) {
e.printStackTrace();
fail();
} finally {
if (router != null) {
try {
router.close();
} catch (Exception e) {
}
}
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ServerTestUtil method checkReplicaTokens.
/**
* Repeatedly check the replication token file until a certain offset value on all nodes on a certain
* partition is found. Fail if {@code numTries} is exceeded or a token offset larger than the target
* is found.
* @param clusterMap the cluster map that contains the data node to inspect
* @param dataNodeId the data node to inspect
* @param targetOffset the token offset to look for in the {@code targetPartition}
* @param targetPartition the name of the partition to look for the {@code targetOffset}
* @throws Exception
*/
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
// we should have an entry for each partition - remote replica pair
Set<String> completeSetToCheck = new HashSet<>();
List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
int numRemoteNodes = 0;
for (ReplicaId replicaId : replicaIds) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
if (replicaId.getPartitionId().isEqual(targetPartition)) {
numRemoteNodes = peerReplicas.size();
}
for (ReplicaId peerReplica : peerReplicas) {
completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
}
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
int numTries = 4;
boolean foundTarget = false;
while (!foundTarget && numTries > 0) {
Thread.sleep(5000);
numTries--;
Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
int numFound = 0;
for (String mountPath : mountPaths) {
File replicaTokenFile = new File(mountPath, "replicaTokens");
if (replicaTokenFile.exists()) {
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
DataInputStream dataInputStream = new DataInputStream(crcStream);
try {
short version = dataInputStream.readShort();
assertEquals(1, version);
while (dataInputStream.available() > 8) {
// read partition id
PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
// read remote node host name
String hostname = Utils.readIntString(dataInputStream);
// read remote replica path
Utils.readIntString(dataInputStream);
// read remote port
int port = dataInputStream.readInt();
assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
setToCheck.remove(partitionId.toString() + hostname + port);
// read total bytes read from local store
dataInputStream.readLong();
// read replica type
ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
// read replica token
StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
Offset endTokenOffset = token.getOffset();
long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
System.out.println("The parsed token is " + parsedToken);
if (partitionId.isEqual(targetPartition)) {
assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
if (parsedToken == targetOffset) {
numFound++;
}
} else {
assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
}
}
long crc = crcStream.getValue();
assertEquals(crc, dataInputStream.readLong());
} catch (IOException e) {
fail();
} finally {
dataInputStream.close();
}
}
}
if (numFound == numRemoteNodes) {
foundTarget = true;
}
}
if (!foundTarget) {
fail("Could not find target token offset: " + targetOffset);
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeSinglePartitionTest.
static void endToEndReplicationWithMultiNodeSinglePartitionTest(String routerDatacenter, int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLSocketFactory clientSSLSocketFactory1, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
// interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainText port
try {
// The header size of a LogSegment. This shouldn't be here since it breaks the interface of Log. But to satisfy the test cases
// we will use this number here.
// This also means we only have one log segment for this partition. If we put more operations to the partition and it excceeds
// the log segment capacity, this number will have to be increased.
int expectedTokenSize = 18;
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
ArrayList<BlobProperties> propertyList = new ArrayList<>();
ArrayList<BlobId> blobIdList = new ArrayList<>();
ArrayList<byte[]> dataList = new ArrayList<>();
ArrayList<byte[]> encryptionKeyList = new ArrayList<>();
byte[] usermetadata = new byte[1000];
TestUtils.RANDOM.nextBytes(usermetadata);
PartitionId partition = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
for (int i = 0; i < 11; i++) {
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
propertyList.add(new BlobProperties(1000, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null));
blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partition, false, BlobId.BlobDataType.DATACHUNK));
dataList.add(TestUtils.getRandomBytes(1000));
if (testEncryption) {
encryptionKeyList.add(TestUtils.getRandomBytes(128));
} else {
encryptionKeyList.add(null);
}
}
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobIdList.get(0), propertyList.get(0), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(0)), propertyList.get(0).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(0), blobIdList.get(0), encryptionKeyList.get(0) != null ? ByteBuffer.wrap(encryptionKeyList.get(0)) : null, ByteBuffer.wrap(usermetadata), dataList.get(0));
ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
channel1.connect();
channel2.connect();
channel3.connect();
DataInputStream putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
// put blob 2
PutRequest putRequest2 = new PutRequest(1, "client1", blobIdList.get(1), propertyList.get(1), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(1)), propertyList.get(1).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(1), blobIdList.get(1), encryptionKeyList.get(1) != null ? ByteBuffer.wrap(encryptionKeyList.get(1)) : null, ByteBuffer.wrap(usermetadata), dataList.get(1));
putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 3
PutRequest putRequest3 = new PutRequest(1, "client1", blobIdList.get(2), propertyList.get(2), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(2)), propertyList.get(2).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(2), blobIdList.get(2), encryptionKeyList.get(2) != null ? ByteBuffer.wrap(encryptionKeyList.get(2)) : null, ByteBuffer.wrap(usermetadata), dataList.get(2));
putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
PutResponse response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// put blob 4
putRequest = new PutRequest(1, "client1", blobIdList.get(3), propertyList.get(3), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(3)), propertyList.get(3).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(3), blobIdList.get(3), encryptionKeyList.get(3) != null ? ByteBuffer.wrap(encryptionKeyList.get(3)) : null, ByteBuffer.wrap(usermetadata), dataList.get(3));
putResponseStream = channel1.sendAndReceive(putRequest).getInputStream();
response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
// put blob 5
putRequest2 = new PutRequest(1, "client1", blobIdList.get(4), propertyList.get(4), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(4)), propertyList.get(4).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(4), blobIdList.get(4), encryptionKeyList.get(4) != null ? ByteBuffer.wrap(encryptionKeyList.get(4)) : null, ByteBuffer.wrap(usermetadata), dataList.get(4));
putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 6
putRequest3 = new PutRequest(1, "client1", blobIdList.get(5), propertyList.get(5), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(5)), propertyList.get(5).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(5), blobIdList.get(5), encryptionKeyList.get(5) != null ? ByteBuffer.wrap(encryptionKeyList.get(5)) : null, ByteBuffer.wrap(usermetadata), dataList.get(5));
putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// wait till replication can complete
notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), false, getExpiryTimeMs(propertyList.get(5)));
updateBlobTtl(channel3, blobIdList.get(5), cluster.time.milliseconds());
expectedTokenSize += getUpdateRecordSize(blobIdList.get(5), SubRecord.Type.TTL_UPDATE);
checkTtlUpdateStatus(channel3, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
// get blob properties
ArrayList<BlobId> ids = new ArrayList<BlobId>();
MockPartitionId mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobIdList.get(2));
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel2.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp1.getError());
assertEquals(ServerErrorCode.No_Error, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
assertEquals(1000, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", propertyList.get(2).getAccountId(), propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", propertyList.get(2).getContainerId(), propertyOutput.getContainerId());
assertEquals("IsEncrypted mismatch", propertyList.get(2).isEncrypted(), propertyOutput.isEncrypted());
} catch (MessageFormatException e) {
fail();
}
// get user metadata
ids.clear();
ids.add(blobIdList.get(1));
GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest2).getInputStream();
GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp2.getError());
assertEquals(ServerErrorCode.No_Error, resp2.getPartitionResponseInfoList().get(0).getErrorCode());
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(1), resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob
ids.clear();
ids.add(blobIdList.get(0));
GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel3.sendAndReceive(getRequest3).getInputStream();
GetResponse resp3 = GetResponse.readFrom(stream, clusterMap);
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp3.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(dataList.get(0), blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob all
ids.clear();
ids.add(blobIdList.get(0));
GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest4).getInputStream();
GetResponse resp4 = GetResponse.readFrom(stream, clusterMap);
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp4.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(dataList.get(0), blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKeyList.get(0), blobAll.getBlobEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", blobAll.getBlobEncryptionKey());
}
} catch (MessageFormatException e) {
fail();
}
releaseNettyBufUnderneathStream(stream);
if (!testEncryption) {
// get blob data
// Use router to get the blob
Properties routerProperties = getRouterProps(routerDatacenter);
routerProperties.putAll(routerProps);
VerifiableProperties routerVerifiableProperties = new VerifiableProperties(routerProperties);
AccountService accountService = new InMemAccountService(false, true);
Router router = new NonBlockingRouterFactory(routerVerifiableProperties, clusterMap, notificationSystem, getSSLFactoryIfRequired(routerVerifiableProperties), accountService).getRouter();
checkBlobId(router, blobIdList.get(0), dataList.get(0));
checkBlobId(router, blobIdList.get(1), dataList.get(1));
checkBlobId(router, blobIdList.get(2), dataList.get(2));
checkBlobId(router, blobIdList.get(3), dataList.get(3));
checkBlobId(router, blobIdList.get(4), dataList.get(4));
checkBlobId(router, blobIdList.get(5), dataList.get(5));
router.close();
}
// fetch blob that does not exist
// get blob properties
ids = new ArrayList<BlobId>();
mockPartitionId = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertyList.get(0).getAccountId(), propertyList.get(0).getContainerId(), mockPartitionId, false, BlobId.BlobDataType.DATACHUNK));
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(mockPartitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel3.sendAndReceive(getRequest5).getInputStream();
GetResponse resp5 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp5.getError());
assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// delete a blob and ensure it is propagated
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIdList.get(0), System.currentTimeMillis());
expectedTokenSize += getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE);
DataInputStream deleteResponseStream = channel1.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
ids = new ArrayList<BlobId>();
ids.add(blobIdList.get(0));
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest6 = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel3.sendAndReceive(getRequest6).getInputStream();
GetResponse resp6 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp6.getError());
assertEquals(ServerErrorCode.Blob_Deleted, resp6.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// get the data node to inspect replication tokens on
DataNodeId dataNodeId = clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
checkReplicaTokens(clusterMap, dataNodeId, expectedTokenSize - getUpdateRecordSize(blobIdList.get(0), SubRecord.Type.DELETE), "0");
// Shut down server 1
cluster.getServers().get(0).shutdown();
cluster.getServers().get(0).awaitShutdown();
// Add more data to server 2 and server 3. Recover server 1 and ensure it is completely replicated
// put blob 7
putRequest2 = new PutRequest(1, "client1", blobIdList.get(6), propertyList.get(6), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(6)), propertyList.get(6).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(6), blobIdList.get(6), encryptionKeyList.get(6) != null ? ByteBuffer.wrap(encryptionKeyList.get(6)) : null, ByteBuffer.wrap(usermetadata), dataList.get(6));
putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 8
putRequest3 = new PutRequest(1, "client1", blobIdList.get(7), propertyList.get(7), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(7)), propertyList.get(7).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(7), blobIdList.get(7), encryptionKeyList.get(7) != null ? ByteBuffer.wrap(encryptionKeyList.get(7)) : null, ByteBuffer.wrap(usermetadata), dataList.get(7));
putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// put blob 9
putRequest2 = new PutRequest(1, "client1", blobIdList.get(8), propertyList.get(8), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(8)), propertyList.get(8).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(8), blobIdList.get(8), encryptionKeyList.get(8) != null ? ByteBuffer.wrap(encryptionKeyList.get(8)) : null, ByteBuffer.wrap(usermetadata), dataList.get(8));
putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 10
putRequest3 = new PutRequest(1, "client1", blobIdList.get(9), propertyList.get(9), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(9)), propertyList.get(9).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(9), blobIdList.get(9), encryptionKeyList.get(9) != null ? ByteBuffer.wrap(encryptionKeyList.get(9)) : null, ByteBuffer.wrap(usermetadata), dataList.get(9));
putResponseStream = channel3.sendAndReceive(putRequest3).getInputStream();
response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// put blob 11
putRequest2 = new PutRequest(1, "client1", blobIdList.get(10), propertyList.get(10), ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(dataList.get(10)), propertyList.get(10).getBlobSize(), BlobType.DataBlob, encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null);
expectedTokenSize += getPutRecordSize(propertyList.get(10), blobIdList.get(10), encryptionKeyList.get(10) != null ? ByteBuffer.wrap(encryptionKeyList.get(10)) : null, ByteBuffer.wrap(usermetadata), dataList.get(10));
putResponseStream = channel2.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), false, getExpiryTimeMs(propertyList.get(10)));
updateBlobTtl(channel2, blobIdList.get(10), cluster.time.milliseconds());
expectedTokenSize += getUpdateRecordSize(blobIdList.get(10), SubRecord.Type.TTL_UPDATE);
checkTtlUpdateStatus(channel2, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
cluster.reinitServer(0);
// wait for server to recover
notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
channel1.disconnect();
channel1.connect();
// get blob
try {
checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
} catch (MessageFormatException e) {
fail();
}
// check that the ttl update went through
checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
// Shutdown server 1. Remove all its data from all mount path. Recover server 1 and ensure node is built
cluster.getServers().get(0).shutdown();
cluster.getServers().get(0).awaitShutdown();
File mountFile = new File(clusterMap.getReplicaIds(dataNodeId).get(0).getMountPath());
for (File toDelete : Objects.requireNonNull(mountFile.listFiles())) {
deleteFolderContent(toDelete, true);
}
notificationSystem.decrementCreatedReplica(blobIdList.get(1).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(2).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(3).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(4).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementUpdatedReplica(blobIdList.get(5).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
notificationSystem.decrementCreatedReplica(blobIdList.get(6).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(7).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(8).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(9).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementCreatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort());
notificationSystem.decrementUpdatedReplica(blobIdList.get(10).getID(), dataNodeId.getHostname(), dataNodeId.getPort(), UpdateType.TTL_UPDATE);
cluster.reinitServer(0);
notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(3).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(5).getID());
notificationSystem.awaitBlobUpdates(blobIdList.get(5).getID(), UpdateType.TTL_UPDATE);
notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(9).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(10).getID());
notificationSystem.awaitBlobUpdates(blobIdList.get(10).getID(), UpdateType.TTL_UPDATE);
channel1.disconnect();
channel1.connect();
// get blob
try {
checkBlobContent(clusterMap, blobIdList.get(1), channel1, dataList.get(1), encryptionKeyList.get(1));
checkBlobContent(clusterMap, blobIdList.get(2), channel1, dataList.get(2), encryptionKeyList.get(2));
checkBlobContent(clusterMap, blobIdList.get(3), channel1, dataList.get(3), encryptionKeyList.get(3));
checkBlobContent(clusterMap, blobIdList.get(4), channel1, dataList.get(4), encryptionKeyList.get(4));
checkBlobContent(clusterMap, blobIdList.get(5), channel1, dataList.get(5), encryptionKeyList.get(5));
checkBlobContent(clusterMap, blobIdList.get(6), channel1, dataList.get(6), encryptionKeyList.get(6));
checkBlobContent(clusterMap, blobIdList.get(7), channel1, dataList.get(7), encryptionKeyList.get(7));
checkBlobContent(clusterMap, blobIdList.get(8), channel1, dataList.get(8), encryptionKeyList.get(8));
checkBlobContent(clusterMap, blobIdList.get(9), channel1, dataList.get(9), encryptionKeyList.get(9));
checkBlobContent(clusterMap, blobIdList.get(10), channel1, dataList.get(10), encryptionKeyList.get(10));
} catch (MessageFormatException e) {
fail();
}
// check that the ttl updates are present
checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(5), dataList.get(5), true, Utils.Infinite_Time);
checkTtlUpdateStatus(channel1, clusterMap, blobIdFactory, blobIdList.get(10), dataList.get(10), true, Utils.Infinite_Time);
channel1.disconnect();
channel2.disconnect();
channel3.disconnect();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
Aggregations