use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class MockMessageWriteSet method writeTo.
@Override
public long writeTo(Write writeChannel) throws StoreException {
if (exception != null) {
throw exception;
}
long sizeWritten = 0;
for (ByteBuffer buffer : buffers) {
sizeWritten += buffer.remaining();
writeChannel.appendFrom(Channels.newChannel(new ByteBufferInputStream(buffer)), buffer.remaining());
}
return sizeWritten;
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class StaticClusterManagerTest method clusterMapInterface.
@Test
public void clusterMapInterface() {
// Exercise entire clusterMap interface
TestHardwareLayout testHardwareLayout = new TestHardwareLayout("Alpha");
TestPartitionLayout testPartitionLayout = new TestPartitionLayout(testHardwareLayout, null);
// add 3 partitions with read_only state.
testPartitionLayout.partitionState = PartitionState.READ_ONLY;
testPartitionLayout.addNewPartitions(3, DEFAULT_PARTITION_CLASS, testPartitionLayout.partitionState, null);
testPartitionLayout.partitionState = PartitionState.READ_WRITE;
Datacenter localDatacenter = testHardwareLayout.getRandomDatacenter();
Properties props = new Properties();
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.cluster.name", "cluster");
props.setProperty("clustermap.datacenter.name", localDatacenter.getName());
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
ClusterMap clusterMapManager = (new StaticClusterAgentsFactory(clusterMapConfig, testPartitionLayout.getPartitionLayout())).getClusterMap();
for (String metricName : clusterMapManager.getMetricRegistry().getNames()) {
System.out.println(metricName);
}
assertEquals("Incorrect local datacenter ID", localDatacenter.getId(), clusterMapManager.getLocalDatacenterId());
List<? extends PartitionId> writablePartitionIds = clusterMapManager.getWritablePartitionIds(null);
List<? extends PartitionId> partitionIds = clusterMapManager.getAllPartitionIds(null);
assertEquals(writablePartitionIds.size(), testPartitionLayout.getPartitionCount() - 3);
assertEquals(partitionIds.size(), testPartitionLayout.getPartitionCount());
for (PartitionId partitionId : partitionIds) {
if (partitionId.getPartitionState().equals(PartitionState.READ_WRITE)) {
assertTrue("Partition not found in writable set ", writablePartitionIds.contains(partitionId));
} else {
assertFalse("READ_ONLY Partition found in writable set ", writablePartitionIds.contains(partitionId));
}
}
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
assertEquals(partitionId.getReplicaIds().size(), testPartitionLayout.getTotalReplicaCount());
DataInputStream partitionStream = new DataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(partitionId.getBytes())));
try {
PartitionId fetchedPartitionId = clusterMapManager.getPartitionIdFromStream(partitionStream);
assertEquals(partitionId, fetchedPartitionId);
} catch (IOException e) {
assertEquals(true, false);
}
}
for (Datacenter datacenter : testHardwareLayout.getHardwareLayout().getDatacenters()) {
for (DataNode dataNode : datacenter.getDataNodes()) {
DataNodeId dataNodeId = clusterMapManager.getDataNodeId(dataNode.getHostname(), dataNode.getPort());
assertEquals(dataNodeId, dataNode);
for (ReplicaId replicaId : clusterMapManager.getReplicaIds(dataNodeId)) {
assertEquals(dataNodeId, replicaId.getDataNodeId());
}
}
}
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class HardDeleteRecoveryMetadata method getUserMetadataInfo.
private DeserializedUserMetadata getUserMetadataInfo(MessageReadSet readSet, int readSetIndex, int relativeOffset, int userMetadataSize) throws MessageFormatException, IOException {
/* Read the serialized user metadata from the channel */
ByteBuffer userMetaData = ByteBuffer.allocate(userMetadataSize);
readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(userMetaData)), relativeOffset, userMetadataSize);
userMetaData.flip();
return deserializeAndGetUserMetadataWithVersion(new ByteBufferInputStream(userMetaData));
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class ValidatingKeyConvertingTransformer method testInValidDeletedAndExpiredBlobs.
private void testInValidDeletedAndExpiredBlobs(short blobVersion, BlobType blobType, short headerVersionToUse) throws Exception {
MessageFormatRecord.headerVersionToUse = headerVersionToUse;
// MessageSievingInputStream contains put records for 2 valid blobs, 1 corrupt blob, 1 deleted blob and 1 expired.
// id1(put record for valid blob), id2(corrupt), id3(put record of deleted blob), id4(put record of expired blob)
// and id3(put record for valid blob)
// create message stream for blob 1
StoreKey key1 = new MockId("id1");
short accountId1 = Utils.getRandomShort(RANDOM);
short containerId1 = Utils.getRandomShort(RANDOM);
BlobProperties prop1 = new BlobProperties(10, "servid1", accountId1, containerId1, false);
byte[] encryptionKey1 = new byte[100];
RANDOM.nextBytes(encryptionKey1);
byte[] usermetadata1 = new byte[1000];
RANDOM.nextBytes(usermetadata1);
int blobContentSize = 2000;
byte[] data1 = new byte[blobContentSize];
RANDOM.nextBytes(data1);
long blobSize = -1;
if (blobType == BlobType.DataBlob) {
blobSize = (int) Blob_Format_V2.getBlobRecordSize(blobContentSize);
} else {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data1 = byteBufferBlob.array();
blobContentSize = data1.length;
blobSize = (int) Blob_Format_V2.getBlobRecordSize(blobContentSize);
}
ByteBufferInputStream stream1 = new ByteBufferInputStream(ByteBuffer.wrap(data1));
MessageFormatInputStream messageFormatStream1 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key1, ByteBuffer.wrap(encryptionKey1), prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key1, prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType);
MessageInfo msgInfo1 = new MessageInfo(key1, messageFormatStream1.getSize(), accountId1, containerId1, prop1.getCreationTimeInMs());
// create message stream for blob 2
StoreKey key2 = new MockId("id2");
short accountId2 = Utils.getRandomShort(RANDOM);
short containerId2 = Utils.getRandomShort(RANDOM);
BlobProperties prop2 = new BlobProperties(10, "servid2", accountId2, containerId2, false);
byte[] encryptionKey2 = new byte[100];
RANDOM.nextBytes(encryptionKey2);
byte[] usermetadata2 = new byte[1000];
RANDOM.nextBytes(usermetadata2);
blobContentSize = 2000;
byte[] data2 = new byte[blobContentSize];
RANDOM.nextBytes(data2);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data2 = byteBufferBlob.array();
blobContentSize = data2.length;
}
ByteBufferInputStream stream2 = new ByteBufferInputStream(ByteBuffer.wrap(data2));
MessageFormatInputStream messageFormatStream2 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key2, ByteBuffer.wrap(encryptionKey2), prop2, ByteBuffer.wrap(usermetadata2), stream2, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key2, prop2, ByteBuffer.wrap(usermetadata2), stream2, blobContentSize, blobType);
MessageInfo msgInfo2 = new MessageInfo(key2, messageFormatStream2.getSize(), accountId2, containerId2, prop2.getCreationTimeInMs());
// corrupt the message stream but make sure this header version is still valid
byte[] corruptMessageStream2 = new byte[(int) messageFormatStream2.getSize()];
RANDOM.nextBytes(corruptMessageStream2);
corruptMessageStream2[0] = (byte) 0;
corruptMessageStream2[1] = (byte) headerVersionToUse;
InputStream corruptStream2 = new ByteBufferInputStream(ByteBuffer.wrap(corruptMessageStream2));
// create message stream for blob 3 that is deleted.
StoreKey key3 = new MockId("id3");
short accountId3 = Utils.getRandomShort(RANDOM);
short containerId3 = Utils.getRandomShort(RANDOM);
BlobProperties prop3 = new BlobProperties(10, "servid3", accountId3, containerId3, false);
byte[] usermetadata3 = new byte[1000];
RANDOM.nextBytes(usermetadata3);
blobContentSize = 2000;
byte[] data3 = new byte[blobContentSize];
RANDOM.nextBytes(data3);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data3 = byteBufferBlob.array();
blobContentSize = data3.length;
}
ByteBufferInputStream stream3 = new ByteBufferInputStream(ByteBuffer.wrap(data3));
MessageFormatInputStream messageFormatStream3 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key3, null, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key3, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType);
// MessageInfo marks this blob as deleted.
MessageInfo msgInfo3 = new MessageInfo(key3, messageFormatStream3.getSize(), true, false, Utils.Infinite_Time, accountId3, containerId3, prop3.getCreationTimeInMs());
// create message stream for blob 4 that is expired.
StoreKey key4 = new MockId("id4");
short accountId4 = Utils.getRandomShort(RANDOM);
short containerId4 = Utils.getRandomShort(RANDOM);
BlobProperties prop4 = new BlobProperties(10, "servid4", accountId4, containerId4, false);
byte[] usermetadata4 = new byte[1000];
RANDOM.nextBytes(usermetadata4);
blobContentSize = 2000;
byte[] data4 = new byte[blobContentSize];
RANDOM.nextBytes(data4);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data4 = byteBufferBlob.array();
blobContentSize = data4.length;
}
ByteBufferInputStream stream4 = new ByteBufferInputStream(ByteBuffer.wrap(data4));
MessageFormatInputStream messageFormatStream4 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key4, null, prop4, ByteBuffer.wrap(usermetadata4), stream4, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key4, prop4, ByteBuffer.wrap(usermetadata4), stream4, blobContentSize, blobType);
// MessageInfo marks this as already expired (the third field is an absolute expiresAt time).
MessageInfo msgInfo4 = new MessageInfo(key4, messageFormatStream4.getSize(), 1, accountId4, containerId4, prop4.getCreationTimeInMs());
// create message stream for blob 5
StoreKey key5 = new MockId("id5");
short accountId5 = Utils.getRandomShort(RANDOM);
short containerId5 = Utils.getRandomShort(RANDOM);
BlobProperties prop5 = new BlobProperties(10, "servid5", accountId5, containerId5, false);
byte[] encryptionKey5 = new byte[100];
RANDOM.nextBytes(encryptionKey5);
byte[] usermetadata5 = new byte[1000];
RANDOM.nextBytes(usermetadata5);
blobContentSize = 2000;
byte[] data5 = new byte[blobContentSize];
RANDOM.nextBytes(data5);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data5 = byteBufferBlob.array();
blobContentSize = data5.length;
}
ByteBufferInputStream stream5 = new ByteBufferInputStream(ByteBuffer.wrap(data5));
MessageFormatInputStream messageFormatStream5 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key5, ByteBuffer.wrap(encryptionKey5), prop5, ByteBuffer.wrap(usermetadata5), stream5, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key5, prop5, ByteBuffer.wrap(usermetadata5), stream5, blobContentSize, blobType);
MessageInfo msgInfo5 = new MessageInfo(key5, messageFormatStream5.getSize(), accountId5, containerId5, prop5.getCreationTimeInMs());
// create message stream for blob 6
StoreKey key6 = new MockId("id6");
short accountId6 = Utils.getRandomShort(RANDOM);
short containerId6 = Utils.getRandomShort(RANDOM);
BlobProperties prop6 = new BlobProperties(10, "servid6", accountId6, containerId6, false);
byte[] encryptionKey6 = new byte[100];
RANDOM.nextBytes(encryptionKey6);
byte[] usermetadata6 = new byte[1000];
RANDOM.nextBytes(usermetadata6);
blobContentSize = 2000;
byte[] data6 = new byte[blobContentSize];
RANDOM.nextBytes(data6);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data2 = byteBufferBlob.array();
blobContentSize = data6.length;
}
ByteBufferInputStream stream6 = new ByteBufferInputStream(ByteBuffer.wrap(data2));
MessageFormatInputStream messageFormatStream6 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key6, ByteBuffer.wrap(encryptionKey6), prop6, ByteBuffer.wrap(usermetadata6), stream6, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key6, prop6, ByteBuffer.wrap(usermetadata6), stream6, blobContentSize, blobType);
MessageInfo msgInfo6 = new MessageInfo(key6, messageFormatStream6.getSize(), accountId6, containerId6, prop6.getCreationTimeInMs());
// corrupt the message stream but make sure this header version is not valid
byte[] corruptMessageStream6 = new byte[(int) messageFormatStream6.getSize()];
RANDOM.nextBytes(corruptMessageStream6);
corruptMessageStream6[1] = (byte) 100;
InputStream corruptStream6 = new ByteBufferInputStream(ByteBuffer.wrap(corruptMessageStream6));
// create input stream for all blob messages together
byte[] totalMessageStreamContent = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize() + (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize()];
messageFormatStream1.read(totalMessageStreamContent, 0, (int) messageFormatStream1.getSize());
corruptStream2.read(totalMessageStreamContent, (int) messageFormatStream1.getSize(), (int) messageFormatStream2.getSize());
messageFormatStream3.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream3.getSize());
messageFormatStream4.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize(), (int) messageFormatStream4.getSize());
messageFormatStream5.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize(), (int) messageFormatStream5.getSize());
corruptStream6.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize() + (int) messageFormatStream5.getSize(), (int) messageFormatStream6.getSize());
InputStream inputStream = new ByteBufferInputStream(ByteBuffer.wrap(totalMessageStreamContent));
List<MessageInfo> msgInfoList = new ArrayList<>();
msgInfoList.add(msgInfo1);
msgInfoList.add(msgInfo2);
msgInfoList.add(msgInfo3);
msgInfoList.add(msgInfo4);
msgInfoList.add(msgInfo5);
msgInfoList.add(msgInfo6);
MessageSievingInputStream sievedStream = new MessageSievingInputStream(inputStream, msgInfoList, transformers, new MetricRegistry());
Map<StoreKey, StoreKey> convertedMap = randomKeyConverter.convert(Arrays.asList(key1, key2, key3, key4, key5, key6));
int headerSize = getHeaderSizeForVersion(headerVersionToUse);
int blobPropertiesRecordSize = BlobProperties_Format_V1.getBlobPropertiesRecordSize(prop1);
int userMetadataSize = UserMetadata_Format_V1.getUserMetadataSize(ByteBuffer.wrap(usermetadata1));
int totalHeadSize = 2 * headerSize;
int totalBlobPropertiesSize = 2 * blobPropertiesRecordSize;
int totalUserMetadataSize = 2 * userMetadataSize;
int totalBlobSize = 2 * (int) blobSize;
int totalKeySize = options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key1).sizeInBytes() + convertedMap.get(key5).sizeInBytes() : key1.sizeInBytes() + key5.sizeInBytes();
int totalEncryptionRecordSize = blobVersion > Blob_Version_V1 ? BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey1)) + BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey5)) : 0;
if (!options.isEmpty()) {
Assert.assertTrue(sievedStream.hasInvalidMessages());
Assert.assertEquals((int) sievedStream.getValidMessageInfoList().stream().mapToLong(MessageInfo::getSize).sum(), sievedStream.getSize());
Assert.assertEquals(totalHeadSize + totalBlobPropertiesSize + totalUserMetadataSize + totalBlobSize + totalKeySize + totalEncryptionRecordSize, sievedStream.getSize());
verifySievedTransformedMessage(sievedStream, options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key1) : key1, "servid1", accountId1, containerId1, blobVersion > Blob_Version_V1 ? encryptionKey1 : null, usermetadata1, data1, blobVersion, blobType);
verifySievedTransformedMessage(sievedStream, options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key5) : key5, "servid5", accountId5, containerId5, blobVersion > Blob_Version_V1 ? encryptionKey5 : null, usermetadata5, data5, blobVersion, blobType);
} else {
// even if there are no transformers, deleted and expired messages should be dropped by the MessageSievingInputStream.
byte[] expectedBytes = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize()];
System.arraycopy(totalMessageStreamContent, 0, expectedBytes, 0, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize());
System.arraycopy(totalMessageStreamContent, totalMessageStreamContent.length - (int) messageFormatStream5.getSize() - (int) messageFormatStream6.getSize(), expectedBytes, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize());
Assert.assertEquals(expectedBytes.length, sievedStream.getSize());
byte[] sievedBytes = Utils.readBytesFromStream(sievedStream, sievedStream.getSize());
Assert.assertArrayEquals(expectedBytes, sievedBytes);
}
Assert.assertEquals(-1, sievedStream.read());
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class ValidatingKeyConvertingTransformer method testDeletedRecords.
private void testDeletedRecords(short blobVersion, BlobType blobType) throws Exception {
// MessageSievingInputStream contains put records for 2 valid blobs and 1 delete record
// id1(put record for valid blob), id2(delete record) and id3(put record for valid blob)
ArrayList<Short> versions = new ArrayList<>();
versions.add(Message_Header_Version_V1);
if (blobVersion != Blob_Version_V1) {
versions.add(Message_Header_Version_V2);
versions.add(Message_Header_Version_V3);
}
try {
for (short version : versions) {
headerVersionToUse = version;
// create message stream for blob 1
StoreKey key1 = new MockId("id1");
short accountId = Utils.getRandomShort(RANDOM);
short containerId = Utils.getRandomShort(RANDOM);
BlobProperties prop1 = new BlobProperties(10, "servid1", accountId, containerId, false);
byte[] encryptionKey1 = new byte[100];
RANDOM.nextBytes(encryptionKey1);
byte[] usermetadata1 = new byte[1000];
RANDOM.nextBytes(usermetadata1);
int blobContentSize = 2000;
byte[] data1 = new byte[blobContentSize];
RANDOM.nextBytes(data1);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data1 = byteBufferBlob.array();
blobContentSize = data1.length;
}
ByteBufferInputStream stream1 = new ByteBufferInputStream(ByteBuffer.wrap(data1));
MessageFormatInputStream messageFormatStream1 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key1, ByteBuffer.wrap(encryptionKey1), prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key1, prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType);
MessageInfo msgInfo1 = new MessageInfo(key1, messageFormatStream1.getSize(), accountId, containerId, prop1.getCreationTimeInMs());
// create message stream for blob 2 and mark it as deleted
StoreKey key2 = new MockId("id2");
accountId = Utils.getRandomShort(RANDOM);
containerId = Utils.getRandomShort(RANDOM);
long deletionTimeMs = SystemTime.getInstance().milliseconds() + RANDOM.nextInt();
MessageFormatInputStream messageFormatStream2 = new DeleteMessageFormatInputStream(key2, accountId, containerId, deletionTimeMs);
MessageInfo msgInfo2 = new MessageInfo(key2, messageFormatStream2.getSize(), accountId, containerId, deletionTimeMs);
// create message stream for blob 3
StoreKey key3 = new MockId("id3");
accountId = Utils.getRandomShort(RANDOM);
containerId = Utils.getRandomShort(RANDOM);
BlobProperties prop3 = new BlobProperties(10, "servid3", accountId, containerId, false);
byte[] encryptionKey3 = new byte[100];
RANDOM.nextBytes(encryptionKey3);
byte[] usermetadata3 = new byte[1000];
RANDOM.nextBytes(usermetadata3);
blobContentSize = 2000;
byte[] data3 = new byte[blobContentSize];
RANDOM.nextBytes(data3);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data3 = byteBufferBlob.array();
blobContentSize = data3.length;
}
ByteBufferInputStream stream3 = new ByteBufferInputStream(ByteBuffer.wrap(data3));
MessageFormatInputStream messageFormatStream3 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key3, ByteBuffer.wrap(encryptionKey3), prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key3, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType);
MessageInfo msgInfo3 = new MessageInfo(key3, messageFormatStream3.getSize(), accountId, containerId, prop3.getCreationTimeInMs());
// create input stream for all blob messages together
byte[] totalMessageContent = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize()];
messageFormatStream1.read(totalMessageContent, 0, (int) messageFormatStream1.getSize());
messageFormatStream2.read(totalMessageContent, (int) messageFormatStream1.getSize(), (int) messageFormatStream2.getSize());
messageFormatStream3.read(totalMessageContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream3.getSize());
InputStream inputStream = new ByteBufferInputStream(ByteBuffer.wrap(totalMessageContent));
List<MessageInfo> msgInfoList = new ArrayList<MessageInfo>();
msgInfoList.add(msgInfo1);
msgInfoList.add(msgInfo2);
msgInfoList.add(msgInfo3);
new MessageSievingInputStream(inputStream, msgInfoList, transformers, new MetricRegistry());
if (!options.isEmpty()) {
Assert.fail("IOException should have been thrown due to delete record ");
}
}
} catch (IOException e) {
if (options.isEmpty()) {
Assert.fail("No exceptions should have occurred");
}
}
headerVersionToUse = Message_Header_Version_V1;
}
Aggregations