use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class ServerTestUtil method checkReplicaTokens.
/**
* Repeatedly check the replication token file until a certain offset value on all nodes on a certain
* partition is found. Fail if {@code numTries} is exceeded or a token offset larger than the target
* is found.
* @param clusterMap the cluster map that contains the data node to inspect
* @param dataNodeId the data node to inspect
* @param targetOffset the token offset to look for in the {@code targetPartition}
* @param targetPartition the name of the partition to look for the {@code targetOffset}
* @throws Exception
*/
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
// we should have an entry for each partition - remote replica pair
Set<String> completeSetToCheck = new HashSet<>();
List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
int numRemoteNodes = 0;
for (ReplicaId replicaId : replicaIds) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
if (replicaId.getPartitionId().isEqual(targetPartition)) {
numRemoteNodes = peerReplicas.size();
}
for (ReplicaId peerReplica : peerReplicas) {
completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
}
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
int numTries = 4;
boolean foundTarget = false;
while (!foundTarget && numTries > 0) {
Thread.sleep(5000);
numTries--;
Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
int numFound = 0;
for (String mountPath : mountPaths) {
File replicaTokenFile = new File(mountPath, "replicaTokens");
if (replicaTokenFile.exists()) {
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
DataInputStream dataInputStream = new DataInputStream(crcStream);
try {
short version = dataInputStream.readShort();
assertEquals(1, version);
while (dataInputStream.available() > 8) {
// read partition id
PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
// read remote node host name
String hostname = Utils.readIntString(dataInputStream);
// read remote replica path
Utils.readIntString(dataInputStream);
// read remote port
int port = dataInputStream.readInt();
assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
setToCheck.remove(partitionId.toString() + hostname + port);
// read total bytes read from local store
dataInputStream.readLong();
// read replica type
ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
// read replica token
StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
Offset endTokenOffset = token.getOffset();
long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
System.out.println("The parsed token is " + parsedToken);
if (partitionId.isEqual(targetPartition)) {
assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
if (parsedToken == targetOffset) {
numFound++;
}
} else {
assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
}
}
long crc = crcStream.getValue();
assertEquals(crc, dataInputStream.readLong());
} catch (IOException e) {
fail();
} finally {
dataInputStream.close();
}
}
}
if (numFound == numRemoteNodes) {
foundTarget = true;
}
}
if (!foundTarget) {
fail("Could not find target token offset: " + targetOffset);
}
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class ServerTestUtil method endToEndTest.
static void endToEndTest(Port targetPort, String routerDatacenter, MockCluster cluster, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, Properties routerProps, boolean testEncryption) {
try {
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
byte[] userMetadata = new byte[1000];
byte[] data = new byte[31870];
byte[] encryptionKey = new byte[100];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
TestUtils.RANDOM.nextBytes(encryptionKey);
}
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId3 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId4 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
BlobId blobId5 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
ConnectedChannel channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response.getError());
// put blob 2 with an expiry time and apply TTL update later
BlobProperties propertiesForTtlUpdate = new BlobProperties(31870, "serviceid1", "ownerid", "image/png", false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null);
long ttlUpdateBlobExpiryTimeMs = getExpiryTimeMs(propertiesForTtlUpdate);
PutRequest putRequest2 = new PutRequest(1, "client1", blobId2, propertiesForTtlUpdate, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
// put blob 3
PutRequest putRequest3 = new PutRequest(1, "client1", blobId3, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest3).getInputStream();
PutResponse response3 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response3.getError());
// put blob 4 that is expired
BlobProperties propertiesExpired = new BlobProperties(31870, "serviceid1", "ownerid", "jpeg", false, 0, cluster.time.milliseconds(), accountId, containerId, testEncryption, null, null, null);
PutRequest putRequest4 = new PutRequest(1, "client1", blobId4, propertiesExpired, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest4).getInputStream();
PutResponse response4 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals(ServerErrorCode.No_Error, response4.getError());
cluster.time.sleep(10000);
// get blob properties
ArrayList<BlobId> ids = new ArrayList<>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob properties with expired flag set
ids = new ArrayList<BlobId>();
partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_Expired_Blobs);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob properties for expired blob
// 1. With no flag
ArrayList<BlobId> idsExpired = new ArrayList<>();
MockPartitionId partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
idsExpired.add(blobId4);
ArrayList<PartitionRequestInfo> partitionRequestInfoListExpired = new ArrayList<>();
PartitionRequestInfo partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
GetRequest getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.None);
DataInputStream streamExpired = channel.sendAndReceive(getRequestExpired).getInputStream();
GetResponse respExpired = GetResponse.readFrom(streamExpired, clusterMap);
assertEquals(ServerErrorCode.Blob_Expired, respExpired.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(streamExpired);
// 2. With Include_Expired flag
idsExpired = new ArrayList<>();
partitionExpired = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
idsExpired.add(blobId4);
partitionRequestInfoListExpired = new ArrayList<>();
partitionRequestInfoExpired = new PartitionRequestInfo(partitionExpired, idsExpired);
partitionRequestInfoListExpired.add(partitionRequestInfoExpired);
getRequestExpired = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoListExpired, GetOption.Include_Expired_Blobs);
streamExpired = channel.sendAndReceive(getRequestExpired).getInputStream();
respExpired = GetResponse.readFrom(streamExpired, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(respExpired.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("ownerid", propertyOutput.getOwnerId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(streamExpired);
} catch (MessageFormatException e) {
fail();
}
// get user metadata
GetRequest getRequest2 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest2).getInputStream();
GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp2.getInputStream());
assertArrayEquals(userMetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp2.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get blob info
GetRequest getRequest3 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobInfo, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest3).getInputStream();
GetResponse resp3 = GetResponse.readFrom(stream, clusterMap);
InputStream responseStream = resp3.getInputStream();
// verify blob properties.
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(responseStream);
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify user metadata
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(responseStream);
assertArrayEquals(userMetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp3.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
releaseNettyBufUnderneathStream(stream);
// get blob all
GetRequest getRequest4 = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest4).getInputStream();
GetResponse resp4 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp4.getInputStream();
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
// verify content
assertArrayEquals("Content mismatch.", data, actualBlobData);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
releaseNettyBufUnderneathStream(stream);
// encryptionKey in this test doesn't have any relation to the content. Both are random bytes for test purposes.
if (!testEncryption) {
// Use router to get the blob
Properties routerProperties = getRouterProps(routerDatacenter);
routerProperties.putAll(routerProps);
VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
AccountService accountService = new InMemAccountService(false, true);
Router router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
checkBlobId(router, blobId1, data);
router.close();
}
checkTtlUpdateStatus(channel, clusterMap, blobIdFactory, blobId2, data, false, ttlUpdateBlobExpiryTimeMs);
updateBlobTtl(channel, blobId2, cluster.time.milliseconds());
checkTtlUpdateStatus(channel, clusterMap, blobIdFactory, blobId2, data, true, Utils.Infinite_Time);
// fetch blob that does not exist
// get blob properties
ids = new ArrayList<>();
partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partition, false, BlobId.BlobDataType.DATACHUNK));
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest5 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest5).getInputStream();
GetResponse resp5 = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.Blob_Not_Found, resp5.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// stop the store via AdminRequest
System.out.println("Begin to stop a BlobStore");
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionIds.get(0), 1, "clientid2");
BlobStoreControlAdminRequest controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StopStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
AdminResponse adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Stop store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
// put a blob on a stopped store, which should fail
putRequest = new PutRequest(1, "client1", blobId5, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
response = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, response.getError());
// get a blob properties on a stopped store, which should fail
ids = new ArrayList<>();
partition = (MockPartitionId) blobId1.getPartition();
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
assertEquals("Get blob properties on stopped store should fail", ServerErrorCode.Replica_Unavailable, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// delete a blob on a stopped store, which should fail
DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, deleteResponse.getError());
// start the store via AdminRequest
System.out.println("Begin to restart the BlobStore");
adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionIds.get(0), 1, "clientid2");
controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StartStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Start store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
List<? extends ReplicaId> replicaIds = partitionIds.get(0).getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
// forcibly mark replicas and disks as up.
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
mockReplicaId.markReplicaDownStatus(false);
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, false);
}
// put a blob on a restarted store , which should succeed
PutRequest putRequest5 = new PutRequest(1, "client1", blobId5, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
putResponseStream = channel.sendAndReceive(putRequest5).getInputStream();
PutResponse response5 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on restarted store should succeed", ServerErrorCode.No_Error, response5.getError());
// get a blob on a restarted store , which should succeed
ids = new ArrayList<>();
PartitionId partitionId = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
partitionRequestInfoList = new ArrayList<>();
partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// undelete a not-deleted blob should return fail
UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should succeed", ServerErrorCode.Blob_Not_Deleted, undeleteResponse.getError());
// delete a blob on a restarted store , which should succeed
deleteRequest = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on restarted store should succeed", ServerErrorCode.No_Error, deleteResponse.getError());
// undelete a deleted blob, which should succeed
undeleteRequest = new UndeleteRequest(2, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should succeed", ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals("Undelete life version mismatch", undeleteResponse.getLifeVersion(), (short) 1);
// undelete an already undeleted blob, which should fail
undeleteRequest = new UndeleteRequest(3, "undeleteClient", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(undeleteRequest).getInputStream();
undeleteResponse = UndeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Undelete blob should fail", ServerErrorCode.Blob_Already_Undeleted, undeleteResponse.getError());
assertEquals("LifeVersion Mismatch", (short) 1, undeleteResponse.getLifeVersion());
// get an undeleted blob, which should succeed
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// Bounce servers to make them read the persisted token file.
cluster.stopServers();
cluster.reinitServers();
channel.disconnect();
channel.connect();
// get an undeleted blob after restart, which should succeed
getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
resp1 = GetResponse.readFrom(stream, clusterMap);
responseStream = resp1.getInputStream();
blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
channel.disconnect();
} catch (Exception e) {
e.printStackTrace();
assertNull(e);
} finally {
List<? extends ReplicaId> replicaIds = cluster.getClusterMap().getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0).getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, true);
}
}
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionTest.
static void endToEndReplicationWithMultiNodeMultiPartitionTest(int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, boolean testEncryption) throws Exception {
// interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainTextPort
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
List<AmbryServer> serverList = cluster.getServers();
byte[] usermetadata = new byte[100];
byte[] data = new byte[100];
byte[] encryptionKey = null;
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(100, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
long expectedExpiryTimeMs = getExpiryTimeMs(properties);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
encryptionKey = new byte[100];
TestUtils.RANDOM.nextBytes(encryptionKey);
}
// connect to all the servers
ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory2, clientSSLConfig2);
ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory3, clientSSLConfig3);
// put all the blobs to random servers
channel1.connect();
channel2.connect();
channel3.connect();
int noOfParallelThreads = 3;
int totalBlobsToPut = 50;
CountDownLatch latch = new CountDownLatch(noOfParallelThreads);
List<DirectSender> runnables = new ArrayList<DirectSender>(noOfParallelThreads);
ConnectedChannel channel = null;
for (int i = 0; i < noOfParallelThreads; i++) {
if (i % noOfParallelThreads == 0) {
channel = channel1;
} else if (i % noOfParallelThreads == 1) {
channel = channel2;
} else if (i % noOfParallelThreads == 2) {
channel = channel3;
}
DirectSender runnable = new DirectSender(cluster, channel, totalBlobsToPut, data, usermetadata, properties, encryptionKey, latch);
runnables.add(runnable);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
}
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// wait till replication can complete
List<BlobId> blobIds = new ArrayList<BlobId>();
for (int i = 0; i < runnables.size(); i++) {
blobIds.addAll(runnables.get(i).getBlobIds());
}
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
}
// Now that the blob is created and replicated, test the cases where a put request arrives for the same blob id
// later than replication.
testLatePutRequest(blobIds.get(0), properties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.No_Error);
// Test the case where a put arrives with the same id as one in the server, but the blob is not identical.
BlobProperties differentProperties = new BlobProperties(properties.getBlobSize(), properties.getServiceId(), accountId, containerId, testEncryption, cluster.time.milliseconds());
testLatePutRequest(blobIds.get(0), differentProperties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentUserMetadata = Arrays.copyOf(usermetadata, usermetadata.length);
differentUserMetadata[0] = (byte) ~differentUserMetadata[0];
testLatePutRequest(blobIds.get(0), properties, differentUserMetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentData = Arrays.copyOf(data, data.length);
differentData[0] = (byte) ~differentData[0];
testLatePutRequest(blobIds.get(0), properties, usermetadata, differentData, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
// verify blob properties, metadata and blob across all nodes
for (int i = 0; i < 3; i++) {
channel = null;
if (i == 0) {
channel = channel1;
} else if (i == 1) {
channel = channel2;
} else if (i == 2) {
channel = channel3;
}
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
for (int j = 0; j < blobIds.size(); j++) {
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(propertyOutput));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(blobAll.getBlobInfo().getBlobProperties()));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
}
}
// ttl update all blobs and wait for replication
Map<ConnectedChannel, List<BlobId>> channelToBlobIds = new HashMap<>();
for (int i = 0; i < blobIds.size(); i++) {
final BlobId blobId = blobIds.get(i);
if (i % 3 == 0) {
channelToBlobIds.computeIfAbsent(channel1, updateChannel -> new ArrayList<>()).add(blobId);
} else if (i % 3 == 1) {
channelToBlobIds.computeIfAbsent(channel2, updateChannel -> new ArrayList<>()).add(blobId);
} else {
channelToBlobIds.computeIfAbsent(channel3, updateChannel -> new ArrayList<>()).add(blobId);
}
}
channelToBlobIds.entrySet().stream().map(entry -> CompletableFuture.supplyAsync(() -> {
try {
for (BlobId blobId : entry.getValue()) {
updateBlobTtl(entry.getKey(), blobId, cluster.time.milliseconds());
}
return null;
} catch (Throwable e) {
throw new RuntimeException("Exception updating ttl for: " + entry, e);
}
})).forEach(CompletableFuture::join);
// check that the TTL update has propagated
blobIds.forEach(blobId -> notificationSystem.awaitBlobUpdates(blobId.getID(), UpdateType.TTL_UPDATE));
// check all servers
for (ConnectedChannel channelToUse : new ConnectedChannel[] { channel1, channel2, channel3 }) {
for (BlobId blobId : blobIds) {
checkTtlUpdateStatus(channelToUse, clusterMap, blobIdFactory, blobId, data, true, Utils.Infinite_Time);
}
}
// delete random blobs, wait for replication and ensure it is deleted in all nodes
Set<BlobId> blobsDeleted = new HashSet<BlobId>();
Set<BlobId> blobsChecked = new HashSet<BlobId>();
for (int i = 0; i < blobIds.size(); i++) {
int j = new Random().nextInt(3);
if (j == 0) {
j = new Random().nextInt(3);
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIds.get(i), System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
blobsDeleted.add(blobIds.get(i));
}
}
Iterator<BlobId> iterator = blobsDeleted.iterator();
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (iterator.hasNext()) {
BlobId deletedId = iterator.next();
notificationSystem.awaitBlobDeletions(deletedId.getID());
for (int j = 0; j < 3; j++) {
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(deletedId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(deletedId.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.Blob_Deleted, resp.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
}
}
// take a server down, clean up a mount path, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(0));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(0)), false);
for (int i = 0; i < blobIds.size(); i++) {
for (ReplicaId replicaId : blobIds.get(i).getPartition().getReplicaIds()) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
if (blobsDeleted.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
notificationSystem.decrementUpdatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort(), UpdateType.TTL_UPDATE);
}
}
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsDeleted.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
notificationSystem.awaitBlobUpdates(blobIds.get(j).getID(), UpdateType.TTL_UPDATE);
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
blobsDeleted.remove(blobIds.get(j));
blobsChecked.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsDeleted.size());
// take a server down, clean all contents, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
for (int i = 0; i < dataNode.getMountPaths().size(); i++) {
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(i));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(i)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(i)), false);
}
for (int i = 0; i < blobIds.size(); i++) {
if (blobsChecked.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsChecked.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
blobsChecked.remove(blobIds.get(j));
blobsDeleted.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsChecked.size());
short expectedLifeVersion = 1;
for (int i = 0; i < 2; i++) {
expectedLifeVersion += i;
// First undelete all deleted blobs
for (BlobId deletedId : blobsDeleted) {
UndeleteRequest undeleteRequest = new UndeleteRequest(2, "reptest", deletedId, System.currentTimeMillis());
DataInputStream undeleteResponseStream = channel3.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(undeleteResponseStream);
releaseNettyBufUnderneathStream(undeleteResponseStream);
assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals(expectedLifeVersion, undeleteResponse.getLifeVersion());
}
Thread.sleep(5000);
// Then use get request to get all the data back and make sure the lifeVersion is correct
for (BlobId id : blobsDeleted) {
// We don't need to wait for blob undeletes, since one of the hosts has Put Record deleted
// from disk, so undelete this blob would end up replicating Put Record instead of undelete.
// notificationSystem.awaitBlobUndeletes(id.toString());
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid20", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertFalse(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
for (BlobId id : blobsDeleted) {
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", id, System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
}
Thread.sleep(1000);
for (BlobId id : blobsDeleted) {
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid200", MessageFormatFlags.All, partitionRequestInfoList, GetOption.Include_All);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertTrue(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
}
channel1.disconnect();
channel2.disconnect();
channel3.disconnect();
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class ServerTestUtil method undeleteRecoveryTest.
static void undeleteRecoveryTest(Port targetPort, MockCluster cluster, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory) {
try {
MockClusterMap clusterMap = cluster.getClusterMap();
byte[] userMetadata = new byte[1000];
byte[] data = new byte[31870];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, false, cluster.time.milliseconds());
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
ConnectedChannel channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
DataInputStream stream = channel.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, response.getError());
for (int i = 0; i < 2; i++) {
// delete blob 1
deleteBlob(channel, blobId1, cluster.time.milliseconds());
// undelete blob 1
undeleteBlob(channel, blobId1, cluster.time.milliseconds(), (short) (i + 1));
}
// put blob 2 that is expired (Add additional 5 secs to avoid Blob_Update_Not_Allowed failure as TtlUpdate op time
// is also cluster.time.milliseconds(). Theoretically, it should succeed as op time = expiry time - buffer time.
// However, the index value converts ms to sec when putting a blob, so the milliseconds part of initial put blob
// time is wiped out and makes op time > expiry time - buffer time. Adding some time should avoid this failure.)
long ttl = 24 * 60 * 60 + 5;
BlobProperties propertiesExpired = new BlobProperties(31870, "serviceid1", "ownerid", "jpeg", false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertiesExpired.getAccountId(), propertiesExpired.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
PutRequest putRequest2 = new PutRequest(1, "client1", blobId2, propertiesExpired, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
stream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
for (int i = 0; i < 2; i++) {
// delete blob 2
deleteBlob(channel, blobId2, cluster.time.milliseconds());
// undelete blob 2
undeleteBlob(channel, blobId2, cluster.time.milliseconds(), (short) (i + 1));
}
// ttl update blob 2
updateBlobTtl(channel, blobId2, cluster.time.milliseconds());
cluster.time.sleep(ttl + 10000);
// Now stops the server and remove all the index files for this partition and test its recovery.
channel.disconnect();
AmbryServer server = cluster.getServers().get(0);
server.shutdown();
server.awaitShutdown();
MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", channel.getRemotePort());
for (ReplicaId replica : partitionIds.get(0).getReplicaIds()) {
if (replica.getDataNodeId().equals(dataNode)) {
for (File file : Objects.requireNonNull(new File(replica.getReplicaPath()).listFiles((file, filename) -> filename.endsWith("index")))) {
file.delete();
}
}
}
cluster.reinitServer(0);
channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
// Now verify that we can fetch blob1 and blob2.
for (BlobId blobId : new BlobId[] { blobId1, blobId2 }) {
long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
while (true) {
// get blob properties
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(blobId);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionIds.get(0), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
if (getResponse.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.No_Error) {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
break;
} else {
Thread.sleep(1000);
if (System.currentTimeMillis() > deadline) {
throw new TimeoutException("Fail to get blob " + blobId + " at " + channel.getRemoteHost());
}
}
}
}
channel.disconnect();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
use of com.github.ambry.clustermap.ReplicaId in project ambry by linkedin.
the class AmbryServerRequests method isRemoteLagLesserOrEqual.
/**
* Provides catch up status of all the remote replicas of {@code partitionIds}.
* @param partitionIds the {@link PartitionId}s for which lag has to be <= {@code acceptableLagInBytes}.
* @param acceptableLagInBytes the maximum lag in bytes that is considered "acceptable".
* @param numReplicasCaughtUpPerPartition the number of replicas that have to be within {@code acceptableLagInBytes}
* (per partition). The min of this value or the total count of replicas - 1 is
* considered.
* @return {@code true} if the lag of each of the remote replicas of each of the {@link PartitionId} in
* {@code partitionIds} <= {@code acceptableLagInBytes}. {@code false} otherwise.
*/
private boolean isRemoteLagLesserOrEqual(Collection<PartitionId> partitionIds, long acceptableLagInBytes, short numReplicasCaughtUpPerPartition) {
boolean isAcceptable = true;
for (PartitionId partitionId : partitionIds) {
List<? extends ReplicaId> replicaIds = partitionId.getReplicaIds();
int caughtUpCount = 0;
for (ReplicaId replicaId : replicaIds) {
if (!replicaId.getDataNodeId().equals(currentNode)) {
long lagInBytes = replicationEngine.getRemoteReplicaLagFromLocalInBytes(partitionId, replicaId.getDataNodeId().getHostname(), replicaId.getReplicaPath());
logger.debug("Lag of {} is {}", replicaId, lagInBytes);
if (lagInBytes <= acceptableLagInBytes) {
caughtUpCount++;
}
if (caughtUpCount >= numReplicasCaughtUpPerPartition) {
break;
}
}
}
// -1 because we shouldn't consider the replica hosted on this node.
if (caughtUpCount < Math.min(replicaIds.size() - 1, numReplicasCaughtUpPerPartition)) {
isAcceptable = false;
break;
}
}
return isAcceptable;
}
Aggregations