use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTest method replicaFromInactiveToOfflineTest.
/**
* Test INACTIVE -> OFFLINE transition on existing replica (both success and failure cases)
*/
@Test
public void replicaFromInactiveToOfflineTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MockHelixParticipant.metricRegistry = new MetricRegistry();
MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
// 1. test replica not found case
try {
mockHelixParticipant.onPartitionBecomeOfflineFromInactive("-1");
fail("should fail because of invalid partition");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", ReplicaNotFound, e.getErrorCode());
}
// 2. test store not started case
PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
storageManager.shutdownBlobStore(existingPartition);
try {
mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
fail("should fail because store is not started");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StoreNotStarted, e.getErrorCode());
}
storageManager.startBlobStore(existingPartition);
// before testing success case, let's write a blob (size = 100) into local store and add a delete record for new blob
Store localStore = storageManager.getStore(existingPartition);
MockId id = new MockId(TestUtils.getRandomString(10), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
long crc = (new Random()).nextLong();
long blobSize = 100;
MessageInfo info = new MessageInfo(id, blobSize, false, false, Utils.Infinite_Time, crc, id.getAccountId(), id.getContainerId(), Utils.Infinite_Time);
List<MessageInfo> infos = new ArrayList<>();
List<ByteBuffer> buffers = new ArrayList<>();
ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) blobSize));
infos.add(info);
buffers.add(buffer);
localStore.put(new MockMessageWriteSet(infos, buffers));
// delete the blob
int deleteRecordSize = (int) (new DeleteMessageFormatInputStream(id, (short) 0, (short) 0, 0).getSize());
MessageInfo deleteInfo = new MessageInfo(id, deleteRecordSize, id.getAccountId(), id.getContainerId(), time.milliseconds());
localStore.delete(Collections.singletonList(deleteInfo));
int sizeOfPutAndHeader = 100 + 18;
int sizeOfWhole = sizeOfPutAndHeader + deleteRecordSize;
// note that end offset of last PUT = 100 + 18 = 118, end offset of the store is sizeOfWhole
// 3. test success case (create a new thread and trigger INACTIVE -> OFFLINE transition)
ReplicaId localReplica = storageManager.getReplica(existingPartition.toPathString());
// put a decommission-in-progress file into local store dir
File decommissionFile = new File(localReplica.getReplicaPath(), "decommission_in_progress");
assertTrue("Couldn't create decommission file in local store", decommissionFile.createNewFile());
decommissionFile.deleteOnExit();
assertNotSame("Before disconnection, the local store state shouldn't be OFFLINE", ReplicaState.OFFLINE, localStore.getCurrentState());
mockHelixParticipant.registerPartitionStateChangeListener(StateModelListenerType.ReplicationManagerListener, replicationManager.replicationListener);
CountDownLatch participantLatch = new CountDownLatch(1);
replicationManager.listenerExecutionLatch = new CountDownLatch(1);
Utils.newThread(() -> {
mockHelixParticipant.onPartitionBecomeOfflineFromInactive(existingPartition.toPathString());
participantLatch.countDown();
}, false).start();
assertTrue("Partition state change listener in ReplicationManager didn't get called within 1 sec", replicationManager.listenerExecutionLatch.await(1, TimeUnit.SECONDS));
// the state of local store should be updated to OFFLINE
assertEquals("Local store state is not expected", ReplicaState.OFFLINE, localStore.getCurrentState());
// update replication lag between local and peer replicas
List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(existingPartition).getRemoteReplicaInfos();
ReplicaId peerReplica1 = remoteReplicaInfos.get(0).getReplicaId();
ReplicaId peerReplica2 = remoteReplicaInfos.get(1).getReplicaId();
// peer1 catches up with last PUT, peer2 catches up with end offset of local store. In this case, SyncUp is not complete
replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfPutAndHeader);
replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath(), sizeOfWhole);
assertFalse("Only one peer replica has fully caught up with end offset so sync-up should not complete", mockHelixParticipant.getReplicaSyncUpManager().isSyncUpComplete(localReplica));
// make peer1 catch up with end offset
replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), sizeOfWhole);
// Now, sync-up should complete and transition should be able to proceed.
assertTrue("Inactive-To-Offline transition didn't complete within 1 sec", participantLatch.await(1, TimeUnit.SECONDS));
assertFalse("Local store should be stopped after transition", localStore.isStarted());
storageManager.shutdown();
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method addUndeleteMessagesToReplicasOfPartition.
public static void addUndeleteMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts, short lifeVersion) throws MessageFormatException, IOException {
for (MockHost host : hosts) {
MessageInfo latestInfo = getMergedMessageInfo(id, host.infosByPartition.get(partitionId));
short aid;
short cid;
long expirationTime;
if (latestInfo == null) {
aid = ((BlobId) id).getAccountId();
cid = ((BlobId) id).getContainerId();
expirationTime = EXPIRY_TIME_MS;
} else {
aid = latestInfo.getAccountId();
cid = latestInfo.getContainerId();
expirationTime = latestInfo.getExpirationTimeInMs();
}
ByteBuffer buffer = getUndeleteMessage(id, aid, cid, lifeVersion, CONSTANT_TIME_MS);
host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), false, false, true, expirationTime, null, aid, cid, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method getMergedMessageInfo.
/**
* Returns a merged {@link MessageInfo} for {@code key}
* @param key the {@link StoreKey} to look for
* @param partitionInfos the {@link MessageInfo}s for the partition
* @return a merged {@link MessageInfo} for {@code key}
*/
public static MessageInfo getMergedMessageInfo(StoreKey key, List<MessageInfo> partitionInfos) {
MessageInfo info = getMessageInfo(key, partitionInfos, true, true, false);
if (info == null) {
info = getMessageInfo(key, partitionInfos, false, false, false);
}
MessageInfo ttlUpdateInfo = getMessageInfo(key, partitionInfos, false, false, true);
if (ttlUpdateInfo != null) {
info = new MessageInfo(info.getStoreKey(), info.getSize(), info.isDeleted(), true, info.isUndeleted(), ttlUpdateInfo.getExpirationTimeInMs(), info.getCrc(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), info.getLifeVersion());
}
return info;
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method addDeleteMessagesToReplicasOfPartition.
public static void addDeleteMessagesToReplicasOfPartition(PartitionId partitionId, StoreKey id, List<MockHost> hosts, short lifeVersion, long expirationTime) throws MessageFormatException, IOException {
short accountId = ((BlobId) id).getAccountId();
short containerId = ((BlobId) id).getContainerId();
ByteBuffer buffer = getDeleteMessage(id, accountId, containerId, CONSTANT_TIME_MS, lifeVersion);
for (MockHost host : hosts) {
// ok to send false for ttlUpdated
host.addMessage(partitionId, new MessageInfo(id, buffer.remaining(), true, false, false, expirationTime, null, accountId, containerId, CONSTANT_TIME_MS, lifeVersion), buffer.duplicate());
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTestHelper method createPutMessage.
/**
* Constructs an entire message with header, blob properties, user metadata and blob content.
* @param id id for which the message has to be constructed.
* @param accountId accountId of the blob
* @param containerId containerId of the blob
* @param enableEncryption {@code true} if encryption needs to be enabled. {@code false} otherwise
* @param lifeVersion lifeVersion for this hich the message has to be constructed.
* @return a {@link Pair} of {@link ByteBuffer} and {@link MessageInfo} representing the entire message and the
* associated {@link MessageInfo}
* @throws MessageFormatException
* @throws IOException
*/
public static PutMsgInfoAndBuffer createPutMessage(StoreKey id, short accountId, short containerId, boolean enableEncryption, short lifeVersion) throws MessageFormatException, IOException {
Random blobIdRandom = new Random(id.getID().hashCode());
int blobSize = blobIdRandom.nextInt(500) + 501;
int userMetadataSize = blobIdRandom.nextInt(blobSize / 2);
int encryptionKeySize = blobIdRandom.nextInt(blobSize / 4);
byte[] blob = new byte[blobSize];
byte[] usermetadata = new byte[userMetadataSize];
byte[] encryptionKey = enableEncryption ? new byte[encryptionKeySize] : null;
blobIdRandom.nextBytes(blob);
blobIdRandom.nextBytes(usermetadata);
BlobProperties blobProperties = new BlobProperties(blobSize, "test", null, null, false, EXPIRY_TIME_MS - CONSTANT_TIME_MS, CONSTANT_TIME_MS, accountId, containerId, encryptionKey != null, null, null, null);
MessageFormatInputStream stream = new PutMessageFormatInputStream(id, encryptionKey == null ? null : ByteBuffer.wrap(encryptionKey), blobProperties, ByteBuffer.wrap(usermetadata), new ByteBufferInputStream(ByteBuffer.wrap(blob)), blobSize, BlobType.DataBlob, lifeVersion);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return new PutMsgInfoAndBuffer(ByteBuffer.wrap(message), new MessageInfo(id, message.length, false, false, false, EXPIRY_TIME_MS, null, accountId, containerId, CONSTANT_TIME_MS, lifeVersion));
}
Aggregations