use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionTest.
static void endToEndReplicationWithMultiNodeMultiPartitionTest(int interestedDataNodePortNumber, Port dataNode1Port, Port dataNode2Port, Port dataNode3Port, MockCluster cluster, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, boolean testEncryption) throws Exception {
// interestedDataNodePortNumber is used to locate the datanode and hence has to be PlainTextPort
MockClusterMap clusterMap = cluster.getClusterMap();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
List<AmbryServer> serverList = cluster.getServers();
byte[] usermetadata = new byte[100];
byte[] data = new byte[100];
byte[] encryptionKey = null;
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(100, "serviceid1", null, null, false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
long expectedExpiryTimeMs = getExpiryTimeMs(properties);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
if (testEncryption) {
encryptionKey = new byte[100];
TestUtils.RANDOM.nextBytes(encryptionKey);
}
// connect to all the servers
ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(dataNode1Port, "localhost", clientSSLSocketFactory1, clientSSLConfig1);
ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(dataNode2Port, "localhost", clientSSLSocketFactory2, clientSSLConfig2);
ConnectedChannel channel3 = getBlockingChannelBasedOnPortType(dataNode3Port, "localhost", clientSSLSocketFactory3, clientSSLConfig3);
// put all the blobs to random servers
channel1.connect();
channel2.connect();
channel3.connect();
int noOfParallelThreads = 3;
int totalBlobsToPut = 50;
CountDownLatch latch = new CountDownLatch(noOfParallelThreads);
List<DirectSender> runnables = new ArrayList<DirectSender>(noOfParallelThreads);
ConnectedChannel channel = null;
for (int i = 0; i < noOfParallelThreads; i++) {
if (i % noOfParallelThreads == 0) {
channel = channel1;
} else if (i % noOfParallelThreads == 1) {
channel = channel2;
} else if (i % noOfParallelThreads == 2) {
channel = channel3;
}
DirectSender runnable = new DirectSender(cluster, channel, totalBlobsToPut, data, usermetadata, properties, encryptionKey, latch);
runnables.add(runnable);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
}
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// wait till replication can complete
List<BlobId> blobIds = new ArrayList<BlobId>();
for (int i = 0; i < runnables.size(); i++) {
blobIds.addAll(runnables.get(i).getBlobIds());
}
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
}
// Now that the blob is created and replicated, test the cases where a put request arrives for the same blob id
// later than replication.
testLatePutRequest(blobIds.get(0), properties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.No_Error);
// Test the case where a put arrives with the same id as one in the server, but the blob is not identical.
BlobProperties differentProperties = new BlobProperties(properties.getBlobSize(), properties.getServiceId(), accountId, containerId, testEncryption, cluster.time.milliseconds());
testLatePutRequest(blobIds.get(0), differentProperties, usermetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentUserMetadata = Arrays.copyOf(usermetadata, usermetadata.length);
differentUserMetadata[0] = (byte) ~differentUserMetadata[0];
testLatePutRequest(blobIds.get(0), properties, differentUserMetadata, data, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
byte[] differentData = Arrays.copyOf(data, data.length);
differentData[0] = (byte) ~differentData[0];
testLatePutRequest(blobIds.get(0), properties, usermetadata, differentData, encryptionKey, channel1, channel2, channel3, ServerErrorCode.Blob_Already_Exists);
// verify blob properties, metadata and blob across all nodes
for (int i = 0; i < 3; i++) {
channel = null;
if (i == 0) {
channel = channel1;
} else if (i == 1) {
channel = channel2;
} else if (i == 2) {
channel = channel3;
}
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
for (int j = 0; j < blobIds.size(); j++) {
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(propertyOutput));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
assertEquals("Expiration time mismatch (props)", expectedExpiryTimeMs, getExpiryTimeMs(blobAll.getBlobInfo().getBlobProperties()));
assertEquals("Expiration time mismatch (MessageInfo)", expectedExpiryTimeMs, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
e.printStackTrace();
fail();
}
}
}
// ttl update all blobs and wait for replication
Map<ConnectedChannel, List<BlobId>> channelToBlobIds = new HashMap<>();
for (int i = 0; i < blobIds.size(); i++) {
final BlobId blobId = blobIds.get(i);
if (i % 3 == 0) {
channelToBlobIds.computeIfAbsent(channel1, updateChannel -> new ArrayList<>()).add(blobId);
} else if (i % 3 == 1) {
channelToBlobIds.computeIfAbsent(channel2, updateChannel -> new ArrayList<>()).add(blobId);
} else {
channelToBlobIds.computeIfAbsent(channel3, updateChannel -> new ArrayList<>()).add(blobId);
}
}
channelToBlobIds.entrySet().stream().map(entry -> CompletableFuture.supplyAsync(() -> {
try {
for (BlobId blobId : entry.getValue()) {
updateBlobTtl(entry.getKey(), blobId, cluster.time.milliseconds());
}
return null;
} catch (Throwable e) {
throw new RuntimeException("Exception updating ttl for: " + entry, e);
}
})).forEach(CompletableFuture::join);
// check that the TTL update has propagated
blobIds.forEach(blobId -> notificationSystem.awaitBlobUpdates(blobId.getID(), UpdateType.TTL_UPDATE));
// check all servers
for (ConnectedChannel channelToUse : new ConnectedChannel[] { channel1, channel2, channel3 }) {
for (BlobId blobId : blobIds) {
checkTtlUpdateStatus(channelToUse, clusterMap, blobIdFactory, blobId, data, true, Utils.Infinite_Time);
}
}
// delete random blobs, wait for replication and ensure it is deleted in all nodes
Set<BlobId> blobsDeleted = new HashSet<BlobId>();
Set<BlobId> blobsChecked = new HashSet<BlobId>();
for (int i = 0; i < blobIds.size(); i++) {
int j = new Random().nextInt(3);
if (j == 0) {
j = new Random().nextInt(3);
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", blobIds.get(i), System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
blobsDeleted.add(blobIds.get(i));
}
}
Iterator<BlobId> iterator = blobsDeleted.iterator();
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (iterator.hasNext()) {
BlobId deletedId = iterator.next();
notificationSystem.awaitBlobDeletions(deletedId.getID());
for (int j = 0; j < 3; j++) {
if (j == 0) {
channel = channel1;
} else if (j == 1) {
channel = channel2;
} else if (j == 2) {
channel = channel3;
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(deletedId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(deletedId.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.Blob_Deleted, resp.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
}
}
// take a server down, clean up a mount path, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(0));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(0)), false);
for (int i = 0; i < blobIds.size(); i++) {
for (ReplicaId replicaId : blobIds.get(i).getPartition().getReplicaIds()) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(0)) == 0) {
if (blobsDeleted.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
notificationSystem.decrementUpdatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort(), UpdateType.TTL_UPDATE);
}
}
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsDeleted.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
notificationSystem.awaitBlobUpdates(blobIds.get(j).getID(), UpdateType.TTL_UPDATE);
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsDeleted.contains(blobIds.get(j)));
blobsDeleted.remove(blobIds.get(j));
blobsChecked.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsDeleted.size());
// take a server down, clean all contents, start and ensure replication fixes it
serverList.get(0).shutdown();
serverList.get(0).awaitShutdown();
dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", interestedDataNodePortNumber);
for (int i = 0; i < dataNode.getMountPaths().size(); i++) {
System.out.println("Cleaning mount path " + dataNode.getMountPaths().get(i));
for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) {
if (replicaId.getMountPath().compareToIgnoreCase(dataNode.getMountPaths().get(i)) == 0) {
System.out.println("Cleaning partition " + replicaId.getPartitionId());
}
}
deleteFolderContent(new File(dataNode.getMountPaths().get(i)), false);
}
for (int i = 0; i < blobIds.size(); i++) {
if (blobsChecked.contains(blobIds.get(i))) {
notificationSystem.decrementDeletedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
} else {
notificationSystem.decrementCreatedReplica(blobIds.get(i).getID(), dataNode.getHostname(), dataNode.getPort());
}
}
cluster.reinitServer(0);
channel1.disconnect();
channel1.connect();
for (int j = 0; j < blobIds.size(); j++) {
if (blobsChecked.contains(blobIds.get(j))) {
notificationSystem.awaitBlobDeletions(blobIds.get(j).getID());
} else {
notificationSystem.awaitBlobCreations(blobIds.get(j).getID());
}
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobIds.get(j));
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIds.get(j).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
assertEquals(100, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get user metadata
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
assertArrayEquals(usermetadata, userMetadataOutput.array());
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = getBlobDataAndRelease(blobData);
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("MessageMetadata should not have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
assertArrayEquals("EncryptionKey mismatch", encryptionKey, resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0).getEncryptionKey().array());
} else {
assertNull("MessageMetadata should have been null", resp.getPartitionResponseInfoList().get(0).getMessageMetadataList().get(0));
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel1.sendAndReceive(getRequest).getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Deleted || resp.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.Blob_Not_Found) {
assertTrue(blobsChecked.contains(blobIds.get(j)));
blobsChecked.remove(blobIds.get(j));
blobsDeleted.add(blobIds.get(j));
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
} catch (MessageFormatException e) {
fail();
}
}
releaseNettyBufUnderneathStream(stream);
}
assertEquals(0, blobsChecked.size());
short expectedLifeVersion = 1;
for (int i = 0; i < 2; i++) {
expectedLifeVersion += i;
// First undelete all deleted blobs
for (BlobId deletedId : blobsDeleted) {
UndeleteRequest undeleteRequest = new UndeleteRequest(2, "reptest", deletedId, System.currentTimeMillis());
DataInputStream undeleteResponseStream = channel3.sendAndReceive(undeleteRequest).getInputStream();
UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(undeleteResponseStream);
releaseNettyBufUnderneathStream(undeleteResponseStream);
assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
assertEquals(expectedLifeVersion, undeleteResponse.getLifeVersion());
}
Thread.sleep(5000);
// Then use get request to get all the data back and make sure the lifeVersion is correct
for (BlobId id : blobsDeleted) {
// We don't need to wait for blob undeletes, since one of the hosts has Put Record deleted
// from disk, so undelete this blob would end up replicating Put Record instead of undelete.
// notificationSystem.awaitBlobUndeletes(id.toString());
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid20", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertFalse(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
for (BlobId id : blobsDeleted) {
DeleteRequest deleteRequest = new DeleteRequest(1, "reptest", id, System.currentTimeMillis());
DataInputStream deleteResponseStream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(deleteResponseStream);
releaseNettyBufUnderneathStream(deleteResponseStream);
assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
}
Thread.sleep(1000);
for (BlobId id : blobsDeleted) {
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(id);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(id.getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
// get blob all
GetRequest getRequest = new GetRequest(1, "clientid200", MessageFormatFlags.All, partitionRequestInfoList, GetOption.Include_All);
DataInputStream stream = channel1.sendAndReceive(getRequest).getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
assertEquals(ServerErrorCode.No_Error, resp.getError());
assertEquals(1, resp.getPartitionResponseInfoList().size());
assertEquals(ServerErrorCode.No_Error, resp.getPartitionResponseInfoList().get(0).getErrorCode());
assertEquals(1, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().size());
MessageInfo info = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0);
assertEquals(expectedLifeVersion, info.getLifeVersion());
assertTrue(info.isDeleted());
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
byte[] blobout = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals(data, blobout);
if (testEncryption) {
assertNotNull("EncryptionKey should not ne null", blobAll.getBlobEncryptionKey());
assertArrayEquals("EncryptionKey mismatch", encryptionKey, blobAll.getBlobEncryptionKey().array());
} else {
assertNull("EncryptionKey should have been null", blobAll.getBlobEncryptionKey());
}
assertEquals("Expiration time mismatch in MessageInfo", Utils.Infinite_Time, resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
}
}
channel1.disconnect();
channel2.disconnect();
channel3.disconnect();
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class VcrBackupTest method basicTest.
/**
* Basic test to make sure VCR can backup with HelixVcrCluster.
*/
@Test
public void basicTest() throws Exception {
List<BlobId> blobIds = sendBlobToDataNode(dataNode, 10);
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, clusterMapPort, 12410, 12510, serverSSLProps, vcrHelixStateModelFactoryClass, true);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, mockCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), mockCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
// Verify a blob by making a http2 request.
MockClusterMap clusterMap = mockCluster.getClusterMap();
SSLConfig clientSSLConfig = new SSLConfig(new VerifiableProperties(clientSSLProps));
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(clusterMap.getDataNodes().get(0).getHttp2Port(), PortType.HTTP2), "localhost", null, clientSSLConfig);
BlobId blobToVerify = blobIds.get(0);
ArrayList<BlobId> idList = new ArrayList<>(Arrays.asList(blobToVerify));
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobToVerify.getPartition(), idList);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
// Do a simple check
assertEquals(blobSize, propertyOutput.getBlobSize());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
vcrServer.shutdown();
assertTrue("VCR server shutdown timeout.", vcrServer.awaitShutdown(5000));
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class Verifier method run.
@Override
public void run() {
try {
List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (requestsVerified.get() != totalRequests.get() && !cancelTest.get()) {
Payload payload = payloadQueue.poll(1000, TimeUnit.MILLISECONDS);
if (payload != null) {
notificationSystem.awaitBlobCreations(payload.blobId);
for (MockDataNodeId dataNodeId : clusterMap.getDataNodes()) {
ConnectedChannel channel1 = null;
try {
BlobId blobId = new BlobId(payload.blobId, clusterMap);
Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
channel1 = connectionPool.checkOutConnection("localhost", port, 10000);
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
DataInputStream stream = channel1.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println(dataNodeId.getHostname() + " " + dataNodeId.getPort() + " " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
if (propertyOutput.getBlobSize() != payload.blobProperties.getBlobSize()) {
String exceptionMsg = "blob size not matching " + " expected " + payload.blobProperties.getBlobSize() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (!propertyOutput.getServiceId().equals(payload.blobProperties.getServiceId())) {
String exceptionMsg = "service id not matching " + " expected " + payload.blobProperties.getServiceId() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getAccountId() != payload.blobProperties.getAccountId()) {
String exceptionMsg = "accountid not matching " + " expected " + payload.blobProperties.getAccountId() + " actual " + propertyOutput.getAccountId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getContainerId() != payload.blobProperties.getContainerId()) {
String exceptionMsg = "containerId not matching " + " expected " + payload.blobProperties.getContainerId() + " actual " + propertyOutput.getContainerId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.isEncrypted() != payload.blobProperties.isEncrypted()) {
String exceptionMsg = "IsEncrypted not matching " + " expected " + payload.blobProperties.isEncrypted() + " actual " + propertyOutput.isEncrypted();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobproperty");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException(e);
}
}
// get user metadata
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get user metadata " + resp.getError());
throw new IllegalStateException();
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
if (userMetadataOutput.compareTo(ByteBuffer.wrap(payload.metadata)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in usermetadatga");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
// System.out.println("response from get " + resp.getError());
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = new byte[(int) blobData.getSize()];
ByteBuf buffer = blobData.content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobdata");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), new BlobIdFactory(clusterMap));
byte[] blobout = new byte[(int) blobAll.getBlobData().getSize()];
ByteBuf buffer = blobAll.getBlobData().content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in bloball");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
if (payload.blobProperties.getTimeToLiveInSeconds() != Utils.Infinite_Time) {
// ttl update, check and wait for replication
ServerTestUtil.updateBlobTtl(channel1, new BlobId(payload.blobId, clusterMap), time.milliseconds());
ServerTestUtil.checkTtlUpdateStatus(channel1, clusterMap, new BlobIdFactory(clusterMap), blobId, payload.blob, true, Utils.Infinite_Time);
notificationSystem.awaitBlobUpdates(payload.blobId, UpdateType.TTL_UPDATE);
BlobProperties old = payload.blobProperties;
payload.blobProperties = new BlobProperties(old.getBlobSize(), old.getServiceId(), old.getOwnerId(), old.getContentType(), old.isEncrypted(), Utils.Infinite_Time, old.getCreationTimeInMs(), old.getAccountId(), old.getContainerId(), old.isEncrypted(), old.getExternalAssetTag(), old.getContentEncoding(), old.getFilename());
}
} catch (Exception e) {
if (channel1 != null) {
connectionPool.destroyConnection(channel1);
channel1 = null;
}
} finally {
if (channel1 != null) {
connectionPool.checkInConnection(channel1);
channel1 = null;
}
}
}
requestsVerified.incrementAndGet();
}
}
} catch (Exception e) {
e.printStackTrace();
cancelTest.set(true);
} finally {
completedLatch.countDown();
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class GetBlobInfoOperation method handleBody.
/**
* Handle the body of the response: Deserialize and set the {@link BlobInfo} to return if no decryption is required.
* If decryption is required, submit a job for decryption.
* @param payload the body of the response.
* @param messageMetadata the {@link MessageMetadata} associated with the message.
* @param messageInfo the {@link MessageInfo} associated with the message.
* @throws IOException if there is an IOException while deserializing the body.
* @throws MessageFormatException if there is a MessageFormatException while deserializing the body.
*/
private void handleBody(InputStream payload, MessageMetadata messageMetadata, MessageInfo messageInfo) throws IOException, MessageFormatException {
ByteBuffer encryptionKey = messageMetadata == null ? null : messageMetadata.getEncryptionKey();
serverBlobProperties = MessageFormatRecord.deserializeBlobProperties(payload);
updateTtlIfRequired(serverBlobProperties, messageInfo);
ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(payload);
if (encryptionKey == null) {
// if blob is not encrypted, move the state to Complete
BlobInfo blobInfo = new BlobInfo(serverBlobProperties, userMetadata.array(), messageInfo.getLifeVersion());
operationResult = new GetBlobResultInternal(new GetBlobResult(blobInfo, null), null);
} else {
// submit decrypt job
progressTracker.initializeCryptoJobTracker(CryptoJobType.DECRYPTION);
logger.trace("Submitting decrypt job for {}", blobId);
decryptJobMetricsTracker.onJobSubmission();
long startTimeMs = System.currentTimeMillis();
cryptoJobHandler.submitJob(new DecryptJob(blobId, encryptionKey.duplicate(), null, userMetadata, cryptoService, kms, options.getBlobOptions, decryptJobMetricsTracker, (DecryptJob.DecryptJobResult result, Exception exception) -> {
decryptJobMetricsTracker.onJobResultProcessingStart();
logger.trace("Handling decrypt job callback results for {}", blobId);
routerMetrics.decryptTimeMs.update(System.currentTimeMillis() - startTimeMs);
if (exception == null) {
logger.trace("Successfully updating decrypt job callback results for {}", blobId);
BlobInfo blobInfo = new BlobInfo(serverBlobProperties, result.getDecryptedUserMetadata().array(), messageInfo.getLifeVersion());
operationResult = new GetBlobResultInternal(new GetBlobResult(blobInfo, null), null);
progressTracker.setCryptoJobSuccess();
} else {
decryptJobMetricsTracker.incrementOperationError();
logger.trace("Exception {} thrown on decryption for {}", exception, blobId);
setOperationException(new RouterException("Exception thrown on decrypting the content for " + blobId, exception, RouterErrorCode.UnexpectedInternalError));
progressTracker.setCryptoJobFailed();
}
decryptJobMetricsTracker.onJobResultProcessingComplete();
routerCallback.onPollReady();
}));
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class GCMCryptoService method deserializeIV.
/**
* Deserialize IV from the stream
* @param stream the stream from which IV needs to be deserialized
* @return the IV of type byte array thus deserialized
* @throws IOException
* @throws MessageFormatException
*/
private static byte[] deserializeIV(InputStream stream) throws IOException, MessageFormatException {
DataInputStream inputStream = new DataInputStream(stream);
short version = inputStream.readShort();
switch(version) {
case IV_RECORD_VERSION_V_1:
return IVRecord_Format_V1.deserializeIVRecord(inputStream);
default:
throw new MessageFormatException("IVRecord version not supported", MessageFormatErrorCodes.Unknown_Format_Version);
}
}
Aggregations