use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class HardDeleteRecoveryMetadata method getBlobEncryptionKeyRecord.
/**
* Get the Blob Encryption Key Record from the given readSet
* @param readSet the {@link MessageReadSet} from which to read.
* @param readSetIndex the index of the message in the readSet.
* @param relativeOffset the relative offset in the message from which to read.
* @param blobEncryptionKeySize the size of the record to read (in this case the encryption key record).
* @return returns the read encryption key.
* @throws MessageFormatException
* @throws IOException
*/
private ByteBuffer getBlobEncryptionKeyRecord(MessageReadSet readSet, int readSetIndex, long relativeOffset, long blobEncryptionKeySize) throws MessageFormatException, IOException {
/* Read the field from the channel */
ByteBuffer blobEncryptionKey = ByteBuffer.allocate((int) blobEncryptionKeySize);
readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(blobEncryptionKey)), relativeOffset, blobEncryptionKeySize);
blobEncryptionKey.flip();
return deserializeBlobEncryptionKey(new ByteBufferInputStream(blobEncryptionKey));
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class CloudBlobStoreTest method forceUploadExpiredBlob.
/**
* Force upload an expired blob to cloud destination by directly calling {@code CloudDestination}'s {@code uploadBlob} method to avoid expiry checks during upload.
* @return Blobid uploaded
* @throws CloudStorageException if upload fails
*/
private BlobId forceUploadExpiredBlob() throws CloudStorageException {
BlobId expiredBlobId = getUniqueId(refAccountId, refContainerId, false, partitionId);
long size = SMALL_BLOB_SIZE;
long currentTime = System.currentTimeMillis();
CloudBlobMetadata expiredBlobMetadata = new CloudBlobMetadata(expiredBlobId, currentTime, currentTime - 1, size, null);
ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) size));
InputStream inputStream = new ByteBufferInputStream(buffer);
dest.uploadBlob(expiredBlobId, size, expiredBlobMetadata, inputStream);
return expiredBlobId;
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class HelixClusterManagerTest method metricsTest.
/**
* Test that the metrics in {@link HelixClusterManagerMetrics} are updated as expected. This also tests and ensures
* coverage of the methods in {@link HelixClusterManager} that are used only by {@link HelixClusterManagerMetrics}.
*/
@Test
public void metricsTest() throws Exception {
assumeTrue(!overrideEnabled);
counters = clusterManager.getMetricRegistry().getCounters();
gauges = clusterManager.getMetricRegistry().getGauges();
// live instance trigger happens once initially.
long instanceTriggerCount = helixDcs.length;
// Bring one instance (not current instance) down in each dc in order to test the metrics more generally.
for (String zkAddr : helixCluster.getZkAddrs()) {
String instance = helixCluster.getUpInstances(zkAddr).stream().filter(name -> !name.equals(selfInstanceName)).findFirst().get();
helixCluster.bringInstanceDown(instance);
instanceTriggerCount++;
}
// trigger for live instance change event should have come in twice per dc - the initial one, and the one due to a
// node brought up in each DC.
assertEquals(instanceTriggerCount, getCounterValue("liveInstanceChangeTriggerCount"));
assertEquals(helixDcs.length, getCounterValue("instanceConfigChangeTriggerCount"));
assertEquals(helixCluster.getDataCenterCount() + numCloudDcs, getGaugeValue("datacenterCount"));
assertEquals(helixCluster.getDownInstances().size() + helixCluster.getUpInstances().size(), getGaugeValue("dataNodeCount"));
assertEquals(helixCluster.getDownInstances().size(), getGaugeValue("dataNodeDownCount"));
assertEquals(helixCluster.getDiskCount(), getGaugeValue("diskCount"));
assertEquals(helixCluster.getDiskDownCount(), getGaugeValue("diskDownCount"));
assertEquals(helixCluster.getAllPartitions().size(), getGaugeValue("partitionCount"));
assertEquals(helixCluster.getAllWritablePartitions().size(), getGaugeValue("partitionReadWriteCount"));
assertEquals(helixCluster.getAllPartitions().size() - helixCluster.getAllWritablePartitions().size(), getGaugeValue("partitionSealedCount"));
assertEquals(helixCluster.getDiskCapacity(), getGaugeValue("rawTotalCapacityBytes"));
assertEquals(0L, getGaugeValue("isMajorityReplicasDownForAnyPartition"));
assertEquals(0L, getGaugeValue(helixCluster.getDownInstances().iterator().next().replace('_', '-') + "-DataNodeResourceState"));
assertEquals(1L, getGaugeValue(helixCluster.getUpInstances().iterator().next().replace('_', '-') + "-DataNodeResourceState"));
helixCluster.bringAllInstancesDown();
assertEquals(1L, getGaugeValue("isMajorityReplicasDownForAnyPartition"));
if (useComposite) {
helixCluster.bringAllInstancesUp();
PartitionId partition = clusterManager.getWritablePartitionIds(null).get(0);
assertEquals(0L, getCounterValue("getPartitionIdFromStreamMismatchCount"));
ReplicaId replicaId = partition.getReplicaIds().get(0);
assertEquals(0L, getCounterValue("getReplicaIdsMismatchCount"));
// bring the replica down.
for (int i = 0; i < clusterMapConfig.clusterMapFixedTimeoutDiskErrorThreshold; i++) {
clusterManager.onReplicaEvent(replicaId, ReplicaEventType.Disk_Error);
}
clusterManager.getWritablePartitionIds(null);
assertEquals(0L, getCounterValue("getPartitionIdFromStreamMismatchCount"));
InputStream partitionStream = new ByteBufferInputStream(ByteBuffer.wrap(partition.getBytes()));
clusterManager.getPartitionIdFromStream(partitionStream);
assertEquals(0L, getCounterValue("getWritablePartitionIdsMismatchCount"));
clusterManager.hasDatacenter("invalid");
clusterManager.hasDatacenter(helixDcs[0]);
assertEquals(0L, getCounterValue("hasDatacenterMismatchCount"));
DataNodeId dataNodeId = clusterManager.getDataNodeIds().get(0);
assertEquals(0L, getCounterValue("getDataNodeIdsMismatchCount"));
clusterManager.getDataNodeId(dataNodeId.getHostname(), dataNodeId.getPort());
assertEquals(0L, getCounterValue("getDataNodeIdMismatchCount"));
}
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class HelixClusterManagerTest method testPartitionReplicaConsistency.
/**
* Tests that the replica count and replica to partition id mappings as reported by the cluster manager is the same as
* those in the cluster.
*/
private void testPartitionReplicaConsistency() throws Exception {
for (PartitionId partition : clusterManager.getWritablePartitionIds(null)) {
assertEquals(testPartitionLayout.getTotalReplicaCount() + numCloudDcs, partition.getReplicaIds().size());
InputStream partitionStream = new ByteBufferInputStream(ByteBuffer.wrap(partition.getBytes()));
PartitionId fetchedPartition = clusterManager.getPartitionIdFromStream(partitionStream);
assertEquals(partition, fetchedPartition);
}
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class HaltingInputStream method commonCasesTest.
/**
* Tests different types of {@link InputStream} and different sizes of the stream and ensures that the data is read
* correctly.
* @throws Exception
*/
@Test
public void commonCasesTest() throws Exception {
int bufSize = InputStreamReadableStreamChannel.BUFFER_SIZE;
int randSizeLessThanBuffer = TestUtils.RANDOM.nextInt(bufSize - 2) + 2;
int randMultiplier = TestUtils.RANDOM.nextInt(10);
int[] testStreamSizes = { 0, 1, randSizeLessThanBuffer, bufSize, bufSize + 1, bufSize * randMultiplier, bufSize * randMultiplier + 1 };
for (int size : testStreamSizes) {
byte[] src = TestUtils.getRandomBytes(size);
InputStream stream = new ByteBufferInputStream(ByteBuffer.wrap(src));
doReadTest(stream, src, src.length);
stream = new ByteBufferInputStream(ByteBuffer.wrap(src));
doReadTest(stream, src, -1);
stream = new HaltingInputStream(new ByteBufferInputStream(ByteBuffer.wrap(src)));
doReadTest(stream, src, src.length);
stream = new HaltingInputStream(new ByteBufferInputStream(ByteBuffer.wrap(src)));
doReadTest(stream, src, -1);
}
}
Aggregations