use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method addDeleteMessagesToReplicasOfPartition.
/**
* For the given partitionId, constructs delete messages and adds them to the given lists.
* @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
* @param id the {@link StoreKey} to create a delete message for.
* @param hosts the list of {@link MockHost} all of which will be populated with the messages.
* @throws MessageFormatException
* @throws IOException
*/
public static void addDeleteMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts) throws MessageFormatException, IOException {
MessageInfo putMsg = getMessageInfo(id, hosts.get(0).infosByPartition.get(partitionId), false, false, false);
short aid;
short cid;
short lifeVersion;
if (putMsg == null) {
// the StoreKey must be a BlobId in this case (to get the account and container id)
aid = ((BlobId) id).getAccountId();
cid = ((BlobId) id).getContainerId();
lifeVersion = 0;
} else {
aid = putMsg.getAccountId();
cid = putMsg.getContainerId();
lifeVersion = putMsg.getLifeVersion();
}
ByteBuffer buffer = getDeleteMessage(id, aid, cid, CONSTANT_TIME_MS, lifeVersion);
for (MockHost host : hosts) {
// ok to send false for ttlUpdated
host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), true, false, false, Utils.Infinite_Time, null, aid, cid, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method addTtlUpdateMessagesToReplicasOfPartition.
/**
* For the given partitionId, constructs ttl update messages and adds them to the given lists.
* @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
* @param id the {@link StoreKey} to create a ttl update message for.
* @param hosts the list of {@link MockHost} all of which will be populated with the messages.
* @param expirationTime
* @throws MessageFormatException
* @throws IOException
*/
public static void addTtlUpdateMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts, long expirationTime) throws MessageFormatException, IOException {
MessageInfo putMsg = getMessageInfo(id, hosts.get(0).infosByPartition.get(partitionId), false, false, false);
short aid;
short cid;
short lifeVersion;
if (putMsg == null) {
// the StoreKey must be a BlobId in this case (to get the account and container id)
aid = ((BlobId) id).getAccountId();
cid = ((BlobId) id).getContainerId();
lifeVersion = 0;
} else {
aid = putMsg.getAccountId();
cid = putMsg.getContainerId();
lifeVersion = putMsg.getLifeVersion();
}
ByteBuffer buffer = getTtlUpdateMessage(id, aid, cid, expirationTime, CONSTANT_TIME_MS);
for (MockHost host : hosts) {
host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), false, true, false, expirationTime, null, aid, cid, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionTest.
static void endToEndReplicationWithMultiNodeMultiPartitionTest(int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, boolean testEncryption) throws Exception {
// interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainTextPort
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
List<AmbryServer> serverList = cluster.getServers();
byte[] usermetadata = new byte[100];
byte[] data = new byte[100];
byte[] encryptionKey = null;
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(100, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
long expectedExpiryTimeMs = getExpiryTimeMs(properties);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
encryptionKey = new byte[100];
TestUtils.RANDOM.nextBytes(encryptionKey);
}
// connect to all the servers
ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory2, clientSSLConfig2);
ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory3, clientSSLConfig3);
// put all the blobs to random servers
channel1.connect();
channel2.connect();
channel3.connect();
int noOfParallelThreads = 3;
int totalBlobsToPut = 50;
CountDownLatch latch = new CountDownLatch(noOfParallelThreads);
List<DirectSender> runnables = new ArrayList<DirectSender>(noOfParallelThreads);
ConnectedChannel channel = null;
for (int i = 0; i < noOfParallelThreads; i++) {
if (i % noOfParallelThreads == 0) {
channel = channel1;
} else if (i % noOfParallelThreads == 1) {
channel = channel2;
} else if (i % noOfParallelThreads == 2) {
channel = channel3;
}
DirectSender runnable = new DirectSender(cluster, channel, totalBlobsToPut, data, usermetadata, properties, encryptionKey, latch);
runnables.add(runnable);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
}
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// wait till replication can complete
List<BlobId> blobIds = new ArrayList<BlobId>();
for (int i = 0; i < runnables.size(); i++) {
blobIds.addAll(runnables.get(i).getBlobIds());
}
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
}
// Now that the blob is created and replicated, test the cases where a put request arrives for the same blob id
// later than replication.
testLatePutRequest(blobIds.get(0), properties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.No_Error);
// Test the case where a put arrives with the same id as one in the server, but the blob is not identical.
BlobProperties differentProperties = new BlobProperties(properties.getBlobSize(), properties.getServiceId(), accountId, containerId, testEncryption, cluster.time.milliseconds());
testLatePutRequest(blobIds.get(0), differentProperties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentUserMetadata = Arrays.copyOf(usermetadata, usermetadata.length);
differentUserMetadata[0] = (byte) ~differentUserMetadata[0];
testLatePutRequest(blobIds.get(0), properties, differentUserMetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentData = Arrays.copyOf(data, data.length);
differentData[0] = (byte) ~differentData[0];
testLatePutRequest(blobIds.get(0), properties, usermetadata, differentData, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
// verify blob properties, metadata and blob across all nodes
for (int i = 0; i < 3; i++) {
channel = null;
if (i == 0) {
channel = channel1;
} else if (i == 1) {
channel = channel2;
} else if (i == 2) {
channel = channel3;
}
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
for (int j = 0; j < blobIds.size(); j++) {
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(propertyOutput));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(blobAll.getBlobInfo().getBlobProperties()));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
}
}
// ttl update all blobs and wait for replication
Map<ConnectedChannel, List<BlobId>> channelToBlobIds = new HashMap<>();
for (int i = 0; i < blobIds.size(); i++) {
final BlobId blobId = blobIds.get(i);
if (i % 3 == 0) {
channelToBlobIds.computeIfAbsent(channel1, updateChannel -> new ArrayList<>()).add(blobId);
} else if (i % 3 == 1) {
channelToBlobIds.computeIfAbsent(channel2, updateChannel -> new ArrayList<>()).add(blobId);
} else {
channelToBlobIds.computeIfAbsent(channel3, updateChannel -> new ArrayList<>()).add(blobId);
}
}
channelToBlobIds.entrySet().stream().map(entry -> CompletableFuture.supplyAsync(() -> {
try {
for (BlobId blobId : entry.getValue()) {
updateBlobTtl(entry.getKey(), blobId, cluster.time.milliseconds());
}
return null;
} catch (Throwable e) {
throw new RuntimeException("Exception updating ttl for: " + entry, e);
}
})).forEach(CompletableFuture::join);
// check that the TTL update has propagated
blobIds.forEach(blobId -> notificationSystem.awaitBlobUpdates(blobId.getID(), UpdateType.TTL_UPDATE));
// check all servers
for (ConnectedChannel channelToUse : new ConnectedChannel[] { channel1, channel2, channel3 }) {
for (BlobId blobId : blobIds) {
checkTtlUpdateStatus(channelToUse, clusterMap, blobIdFactory, blobId, data, true, Utils.Infinite_Time);
}
}
// delete random blobs, wait for replication and ensure it is deleted in all nodes
Set<BlobId> blobsDeleted = new HashSet<BlobId>();
Set<BlobId> blobsChecked = new HashSet<BlobId>();
for (int i = 0; i < blobIds.size(); i++) {
int j = new Random().nextInt(3);
if (j == 0) {
j = new Random().nextInt(3);
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIds.get(i), System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
blobsDeleted.add(blobIds.get(i));
}
}
Iterator<BlobId> iterator = blobsDeleted.iterator();
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (iterator.hasNext()) {
BlobId deletedId = iterator.next();
notificationSystem.awaitBlobDeletions(deletedId.getID());
for (int j = 0; j < 3; j++) {
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(deletedId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(deletedId.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.Blob_Deleted, resp.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
}
}
// take a server down, clean up a mount path, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(0));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(0)), false);
for (int i = 0; i < blobIds.size(); i++) {
for (ReplicaId replicaId : blobIds.get(i).getPartition().getReplicaIds()) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
if (blobsDeleted.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
notificationSystem.decrementUpdatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort(), UpdateType.TTL_UPDATE);
}
}
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsDeleted.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
notificationSystem.awaitBlobUpdates(blobIds.get(j).getID(), UpdateType.TTL_UPDATE);
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
blobsDeleted.remove(blobIds.get(j));
blobsChecked.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsDeleted.size());
// take a server down, clean all contents, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
for (int i = 0; i < dataNode.getMountPaths().size(); i++) {
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(i));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(i)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(i)), false);
}
for (int i = 0; i < blobIds.size(); i++) {
if (blobsChecked.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsChecked.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
blobsChecked.remove(blobIds.get(j));
blobsDeleted.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsChecked.size());
short expectedLifeVersion = 1;
for (int i = 0; i < 2; i++) {
expectedLifeVersion += i;
// First undelete all deleted blobs
for (BlobId deletedId : blobsDeleted) {
UndeleteRequest undeleteRequest = new UndeleteRequest(2, "reptest", deletedId, System.currentTimeMillis());
DataInputStream undeleteResponseStream = channel3.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(undeleteResponseStream);
releaseNettyBufUnderneathStream(undeleteResponseStream);
assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals(expectedLifeVersion, undeleteResponse.getLifeVersion());
}
Thread.sleep(5000);
// Then use get request to get all the data back and make sure the lifeVersion is correct
for (BlobId id : blobsDeleted) {
// We don't need to wait for blob undeletes, since one of the hosts has Put Record deleted
// from disk, so undelete this blob would end up replicating Put Record instead of undelete.
// notificationSystem.awaitBlobUndeletes(id.toString());
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid20", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertFalse(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
for (BlobId id : blobsDeleted) {
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", id, System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
}
Thread.sleep(1000);
for (BlobId id : blobsDeleted) {
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid200", MessageFormatFlags.All, partitionRequestInfoList, GetOption.Include_All);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertTrue(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
}
channel1.disconnect();
channel2.disconnect();
channel3.disconnect();
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class AmbryServer method startup.
public void startup() throws InstantiationException {
try {
logger.info("starting");
clusterParticipants = clusterAgentsFactory.getClusterParticipants();
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
// verify the configs
properties.verify();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
// mismatch in sealed/stopped replica lists that maintained by each participant.
if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
}
logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
if (nodeId == null) {
throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
}
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
// In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
// and some components require sole participant, the first one in the list will be primary participant.
storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
storageManager.start();
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
replicationManager.start();
if (replicationConfig.replicationEnabledWithVcrCluster) {
logger.info("Creating Helix cluster spectator for cloud to store replication.");
vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
cloudToStoreReplicationManager.start();
}
logger.info("Creating StatsManager to publish stats");
accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
if (serverConfig.serverStatsPublishLocalEnabled) {
statsManager.start();
}
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (nodeId.hasSSLPort()) {
ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (nodeId.hasHttp2Port()) {
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
// Other code
List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
Set<String> validStatsTypes = new HashSet<>();
for (StatsReportType type : StatsReportType.values()) {
validStatsTypes.add(type.toString());
}
if (serverConfig.serverStatsPublishReportEnabled) {
serverConfig.serverStatsReportsToPublish.forEach(e -> {
if (validStatsTypes.contains(e)) {
ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
}
});
}
if (vcrClusterSpectator != null) {
vcrClusterSpectator.spectate();
}
Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
for (ClusterParticipant clusterParticipant : clusterParticipants) {
clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
}
if (nettyInternalMetrics != null) {
nettyInternalMetrics.start();
logger.info("NettyInternalMetric starts");
}
logger.info("started");
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
metrics.serverStartTimeInMs.update(processingTime);
logger.info("Server startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during startup", e);
throw new InstantiationException("failure during startup " + e);
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class VcrRecoveryTest method testGetOnRecoveryNode.
/**
* Do a get on recovery node to test that all the blobids that were uploaded to vcr node have been recovered on recovery node.
* @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
* @throws IOException on {@link IOException}
*/
private void testGetOnRecoveryNode(Map<BlobId, Integer> blobIdToSizeMap) throws IOException {
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(recoveryNodePort, "localhost", null, null);
channel.connect();
AtomicInteger correlationIdGenerator = new AtomicInteger(0);
List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, blobIds));
GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + recoveryNode.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
channel.send(getRequest);
GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
// old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
}
}
}
Aggregations