use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class CloudOperationTest method getBlobAndAssertSuccess.
/**
* Construct GetBlob operations with appropriate callbacks, then poll those operations until they complete,
* and ensure that the whole blob data is read out and the contents match.
* @param blobId id of the blob to get
* @param expectedLifeVersion the expected lifeVersion from get operation.
* @param expectedBlobSize the expected blob size
* @param expectedBlobProperties the expected {@link BlobProperties} for the blob.
* @param expectedUserMetadata the expected user meta data
* @param expectPutContent the expected blob content
* @param options options of the get blob operation
* @throws Exception Any unexpected exception
*/
private void getBlobAndAssertSuccess(final BlobId blobId, final short expectedLifeVersion, final int expectedBlobSize, final BlobProperties expectedBlobProperties, final byte[] expectedUserMetadata, final byte[] expectPutContent, final GetBlobOptionsInternal options) throws Exception {
final CountDownLatch readCompleteLatch = new CountDownLatch(1);
final AtomicLong readCompleteResult = new AtomicLong(0);
// callback to compare the data
Callback<GetBlobResultInternal> callback = (result, exception) -> {
Assert.assertNull("Shouldn't have exception", exception);
try {
BlobInfo blobInfo;
switch(options.getBlobOptions.getOperationType()) {
case All:
Assert.assertFalse("not supposed to be raw mode", options.getBlobOptions.isRawMode());
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertArrayEquals("User metadata must be the same", expectedUserMetadata, blobInfo.getUserMetadata());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
break;
case Data:
Assert.assertNull("Unexpected blob info in operation result", result.getBlobResult.getBlobInfo());
break;
case BlobInfo:
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertNull("Unexpected blob data in operation result", result.getBlobResult.getBlobDataChannel());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
}
} catch (Throwable e) {
Assert.fail("Shouldn't receive exception here");
}
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
final ByteBufferAsyncWritableChannel asyncWritableChannel = new ByteBufferAsyncWritableChannel();
Utils.newThread(() -> {
Future<Long> readIntoFuture = result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null);
assertBlobReadSuccess(options.getBlobOptions, readIntoFuture, asyncWritableChannel, result.getBlobResult.getBlobDataChannel(), readCompleteLatch, readCompleteResult, expectedBlobSize, expectPutContent);
}, false).start();
} else {
readCompleteLatch.countDown();
}
};
// create GetBlobOperation
final Map<Integer, GetOperation> correlationIdToGetOperation = new HashMap<>();
final RequestRegistrationCallback<GetOperation> requestRegistrationCallback = new RequestRegistrationCallback<>(correlationIdToGetOperation);
NonBlockingRouter.currentOperationsCount.incrementAndGet();
GetBlobOperation op = new GetBlobOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, callback, routerCallback, blobIdFactory, null, null, null, time, false, null);
requestRegistrationCallback.setRequestsToSend(new ArrayList<>());
// Wait operation to complete
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
List<ResponseInfo> responses = sendAndWaitForResponses(requestRegistrationCallback.getRequestsToSend());
for (ResponseInfo responseInfo : responses) {
GetResponse getResponse = RouterUtils.extractResponseAndNotifyResponseHandler(responseHandler, routerMetrics, responseInfo, stream -> GetResponse.readFrom(stream, mockClusterMap), response -> {
ServerErrorCode serverError = response.getError();
if (serverError == ServerErrorCode.No_Error) {
serverError = response.getPartitionResponseInfoList().get(0).getErrorCode();
}
return serverError;
});
op.handleResponse(responseInfo, getResponse);
responseInfo.release();
}
}
readCompleteLatch.await();
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
// Ensure that a ChannelClosed exception is not set when the ReadableStreamChannel is closed correctly.
Assert.assertNull("Callback operation exception should be null", op.getOperationException());
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo && !options.getBlobOptions.isRawMode() && !options.getChunkIdsOnly) {
int sizeWritten = expectedBlobSize;
if (options.getBlobOptions.getRange() != null) {
ByteRange range = options.getBlobOptions.getRange().toResolvedByteRange(expectedBlobSize, options.getBlobOptions.resolveRangeOnEmptyBlob());
sizeWritten = (int) range.getRangeSize();
}
Assert.assertEquals("Size read must equal size written", sizeWritten, readCompleteResult.get());
}
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param requestAndResult the {@link RequestAndResult} to use for verification.
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
String blobId = requestAndResult.result.result();
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
boolean stitchOperation = requestAndResult.chunksToStitch != null;
if (stitchOperation) {
assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
}
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
long expectedMaxChunkSize;
long expectedTotalSize;
int expectedNumChunks;
if (stitchOperation) {
expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
expectedNumChunks = requestAndResult.chunksToStitch.size();
} else {
expectedMaxChunkSize = chunkSize;
expectedTotalSize = requestAndResult.putContent.length;
expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
}
assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
// Verify all dataBlobIds are DataChunk
for (StoreKey key : dataBlobIds) {
BlobId origDataBlobId = (BlobId) key;
assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
}
// verify user-metadata
if (requestAndResult.putBlobProperties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
}
if (!stitchOperation) {
verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
}
} else {
notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
// TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
// Once the logic is fixed we should assert Simple.
BlobDataType dataType = origBlobId.getBlobDataType();
assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!requestAndResult.putBlobProperties.isEncrypted()) {
assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
decryptedBlobContent.readBytes(blobContent);
assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
decryptedBlobContent.release();
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class Http2NetworkClientTest method putGetTest.
@Test
public void putGetTest() throws Exception {
MockClusterMap clusterMap = http2Cluster.getClusterMap();
DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
// Put a blob
int blobSize = 1024 * 1024;
byte[] usermetadata = new byte[1000];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
long startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
fail("Network Client no reponse and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
PutResponse putResponse = PutResponse.readFrom(dis);
assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
// Get the blob
// get blob properties
ArrayList<BlobId> ids = new ArrayList<BlobId>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
fail("Network Client no response and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
GetResponse resp = GetResponse.readFrom(dis, clusterMap);
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
// verify BlobProperties
BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
assertEquals(blobSize, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify UserMetadata
byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
assertArrayEquals(usermetadata, userMetadataOutput);
// verify content
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class ServerTestUtil method endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest.
static void endToEndReplicationWithMultiNodeMultiPartitionMultiDCTest(String sourceDatacenter, String sslEnabledDatacenters, PortType portType, MockCluster cluster, MockNotificationSystem notificationSystem, Properties routerProps) throws Exception {
Properties props = new Properties();
props.setProperty("router.hostname", "localhost");
props.setProperty("router.datacenter.name", sourceDatacenter);
props.setProperty("router.put.request.parallelism", "1");
props.setProperty("router.put.success.target", "1");
props.setProperty("clustermap.cluster.name", "test");
props.setProperty("clustermap.datacenter.name", sourceDatacenter);
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("kms.default.container.key", TestUtils.getRandomKey(32));
props.putAll(routerProps);
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
AccountService accountService = new InMemAccountService(false, true);
Router router = new NonBlockingRouterFactory(verifiableProperties, cluster.getClusterMap(), notificationSystem, getSSLFactoryIfRequired(verifiableProperties), accountService).getRouter();
int numberOfRequestsToSend = 15;
int numberOfVerifierThreads = 3;
final LinkedBlockingQueue<Payload> payloadQueue = new LinkedBlockingQueue<Payload>();
final AtomicReference<Exception> exceptionRef = new AtomicReference<>(null);
final CountDownLatch callbackLatch = new CountDownLatch(numberOfRequestsToSend);
List<Future<String>> putFutures = new ArrayList<>(numberOfRequestsToSend);
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
for (int i = 0; i < numberOfRequestsToSend; i++) {
int size = new Random().nextInt(5000);
final BlobProperties properties = new BlobProperties(size, "service1", "owner id check", "image/jpeg", false, TestUtils.TTL_SECS, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
final byte[] metadata = new byte[new Random().nextInt(1000)];
final byte[] blob = new byte[size];
TestUtils.RANDOM.nextBytes(metadata);
TestUtils.RANDOM.nextBytes(blob);
Future<String> future = router.putBlob(properties, metadata, new ByteBufferReadableStreamChannel(ByteBuffer.wrap(blob)), new PutBlobOptionsBuilder().build(), new Callback<String>() {
@Override
public void onCompletion(String result, Exception exception) {
if (exception == null) {
payloadQueue.add(new Payload(properties, metadata, blob, result));
} else {
exceptionRef.set(exception);
}
callbackLatch.countDown();
}
}, QUOTA_CHARGE_EVENT_LISTENER);
putFutures.add(future);
}
for (Future<String> future : putFutures) {
future.get(20, TimeUnit.SECONDS);
}
assertTrue("Did not receive all callbacks in time", callbackLatch.await(1, TimeUnit.SECONDS));
if (exceptionRef.get() != null) {
throw exceptionRef.get();
}
// put away for future use
Payload payload1 = payloadQueue.peek();
MockClusterMap clusterMap = cluster.getClusterMap();
BlobId blobId1 = new BlobId(payload1.blobId, clusterMap);
assertEquals("Did not put expected number of blobs", numberOfRequestsToSend, payloadQueue.size());
Properties sslProps = new Properties();
sslProps.putAll(routerProps);
sslProps.setProperty("clustermap.ssl.enabled.datacenters", sslEnabledDatacenters);
sslProps.setProperty("clustermap.cluster.name", "test");
sslProps.setProperty("clustermap.datacenter.name", sourceDatacenter);
sslProps.setProperty("clustermap.host.name", "localhost");
sslProps.setProperty("connectionpool.read.timeout.ms", "15000");
VerifiableProperties vProps = new VerifiableProperties(sslProps);
ConnectionPool connectionPool = new BlockingChannelConnectionPool(new ConnectionPoolConfig(vProps), new SSLConfig(vProps), new ClusterMapConfig(vProps), new MetricRegistry());
CountDownLatch verifierLatch = new CountDownLatch(numberOfVerifierThreads);
AtomicInteger totalRequests = new AtomicInteger(numberOfRequestsToSend);
AtomicInteger verifiedRequests = new AtomicInteger(0);
AtomicBoolean cancelTest = new AtomicBoolean(false);
for (int i = 0; i < numberOfVerifierThreads; i++) {
Thread thread = new Thread(new Verifier(payloadQueue, verifierLatch, totalRequests, verifiedRequests, cluster.getClusterMap(), cancelTest, portType, connectionPool, notificationSystem, cluster.time));
thread.start();
}
assertTrue("Did not verify in 2 minutes", verifierLatch.await(2, TimeUnit.MINUTES));
assertEquals(totalRequests.get(), verifiedRequests.get());
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
MockDataNodeId dataNodeId = clusterMap.getDataNodes().get(0);
Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
ConnectedChannel channel = connectionPool.checkOutConnection("localhost", port, 10000);
PartitionId partitionId = blobId1.getPartition();
// stop the store via AdminRequest
System.out.println("Begin to stop a BlobStore");
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientid2");
BlobStoreControlAdminRequest controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StopStore, adminRequest);
DataInputStream stream = channel.sendAndReceive(controlRequest).getInputStream();
AdminResponse adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Stop store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
// put a blob on a stopped store, which should fail
byte[] userMetadata = new byte[1000];
byte[] data = new byte[3187];
BlobProperties properties = new BlobProperties(3187, "serviceid1", accountId, containerId, false, cluster.time.milliseconds());
BlobId blobId2 = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), accountId, containerId, partitionId, false, BlobId.BlobDataType.DATACHUNK);
PutRequest putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
DataInputStream putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, response2.getError());
// get a blob properties on a stopped store, which should fail
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
assertEquals("Get blob properties on stopped store should fail", ServerErrorCode.Replica_Unavailable, resp1.getPartitionResponseInfoList().get(0).getErrorCode());
releaseNettyBufUnderneathStream(stream);
// delete a blob on a stopped store, which should fail
DeleteRequest deleteRequest = new DeleteRequest(1, "clientId1", blobId1, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on stopped store should fail", ServerErrorCode.Replica_Unavailable, deleteResponse.getError());
// start the store via AdminRequest
System.out.println("Begin to restart the BlobStore");
adminRequest = new AdminRequest(AdminRequestOrResponseType.BlobStoreControl, partitionId, 1, "clientId");
controlRequest = new BlobStoreControlAdminRequest((short) 0, BlobStoreControlAction.StartStore, adminRequest);
stream = channel.sendAndReceive(controlRequest).getInputStream();
adminResponse = AdminResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Start store admin request should succeed", ServerErrorCode.No_Error, adminResponse.getError());
List<? extends ReplicaId> replicaIds = partitionId.getReplicaIds();
for (ReplicaId replicaId : replicaIds) {
// forcibly mark replicas and disks as up.
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
mockReplicaId.markReplicaDownStatus(false);
((MockDiskId) mockReplicaId.getDiskId()).setDiskState(HardwareState.AVAILABLE, false);
}
// put a blob on a restarted store , which should succeed
putRequest2 = new PutRequest(1, "clientId2", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
putResponseStream = channel.sendAndReceive(putRequest2).getInputStream();
response2 = PutResponse.readFrom(putResponseStream);
releaseNettyBufUnderneathStream(putResponseStream);
assertEquals("Put blob on restarted store should succeed", ServerErrorCode.No_Error, response2.getError());
// verify the put blob has been replicated successfully.
notificationSystem.awaitBlobCreations(blobId2.getID());
// get a blob on a restarted store , which should succeed
ids = new ArrayList<BlobId>();
ids.add(blobId2);
partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
partitionRequestInfo = new PartitionRequestInfo(partitionId, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest2 = new GetRequest(1, "clientId2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest2).getInputStream();
GetResponse resp2 = GetResponse.readFrom(stream, clusterMap);
InputStream responseStream = resp2.getInputStream();
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(responseStream, blobIdFactory);
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
releaseNettyBufUnderneathStream(stream);
// delete a blob on a restarted store , which should succeed
deleteRequest = new DeleteRequest(1, "clientId2", blobId2, System.currentTimeMillis());
stream = channel.sendAndReceive(deleteRequest).getInputStream();
deleteResponse = DeleteResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals("Delete blob on restarted store should succeed", ServerErrorCode.No_Error, deleteResponse.getError());
router.close();
connectionPool.shutdown();
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class DumpIndexTool method main.
public static void main(String[] args) throws Exception {
final AtomicInteger exitCode = new AtomicInteger(0);
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
// this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
ServerConfig serverConfig = new ServerConfig(verifiableProperties);
Time time = SystemTime.getInstance();
Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
Set<StoreKey> filterKeySet = new HashSet<>();
for (String key : config.filterSet) {
filterKeySet.add(new BlobId(key, clusterMap));
}
switch(config.typeOfOperation) {
case DumpIndex:
dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case DumpIndexSegment:
dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case VerifyIndex:
IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
break;
case VerifyDataNode:
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
if (dataNodeId == null) {
logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
} else {
Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
replicaDirs.removeAll(resultsByReplica.keySet());
if (replicaDirs.size() != 0) {
logger.error("Results obtained missing {}", replicaDirs);
exitCode.set(5);
} else {
resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
}
}
break;
default:
throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
}
}
System.exit(exitCode.get());
}
Aggregations