use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.
the class ServerTestUtil method undeleteRecoveryTest.
static void undeleteRecoveryTest(Port targetPort, MockCluster cluster, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory) {
try {
MockClusterMap clusterMap = cluster.getClusterMap();
byte[] userMetadata = new byte[1000];
byte[] data = new byte[31870];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, false, cluster.time.milliseconds());
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
ConnectedChannel channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
DataInputStream stream = channel.sendAndReceive(putRequest).getInputStream();
PutResponse response = PutResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, response.getError());
for (int i = 0; i < 2; i++) {
// delete blob 1
deleteBlob(channel, blobId1, cluster.time.milliseconds());
// undelete blob 1
undeleteBlob(channel, blobId1, cluster.time.milliseconds(), (short) (i + 1));
}
// put blob 2 that is expired (Add additional 5 secs to avoid Blob_Update_Not_Allowed failure as TtlUpdate op time
// is also cluster.time.milliseconds(). Theoretically, it should succeed as op time = expiry time - buffer time.
// However, the index value converts ms to sec when putting a blob, so the milliseconds part of initial put blob
// time is wiped out and makes op time > expiry time - buffer time. Adding some time should avoid this failure.)
long ttl = 24 * 60 * 60 + 5;
BlobProperties propertiesExpired = new BlobProperties(31870, "serviceid1", "ownerid", "jpeg", false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), propertiesExpired.getAccountId(), propertiesExpired.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
PutRequest putRequest2 = new PutRequest(1, "client1", blobId2, propertiesExpired, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
stream = channel.sendAndReceive(putRequest2).getInputStream();
PutResponse response2 = PutResponse.readFrom(stream);
releaseNettyBufUnderneathStream(stream);
assertEquals(ServerErrorCode.No_Error, response2.getError());
for (int i = 0; i < 2; i++) {
// delete blob 2
deleteBlob(channel, blobId2, cluster.time.milliseconds());
// undelete blob 2
undeleteBlob(channel, blobId2, cluster.time.milliseconds(), (short) (i + 1));
}
// ttl update blob 2
updateBlobTtl(channel, blobId2, cluster.time.milliseconds());
cluster.time.sleep(ttl + 10000);
// Now stops the server and remove all the index files for this partition and test its recovery.
channel.disconnect();
AmbryServer server = cluster.getServers().get(0);
server.shutdown();
server.awaitShutdown();
MockDataNodeId dataNode = (MockDataNodeId) clusterMap.getDataNodeId("localhost", channel.getRemotePort());
for (ReplicaId replica : partitionIds.get(0).getReplicaIds()) {
if (replica.getDataNodeId().equals(dataNode)) {
for (File file : Objects.requireNonNull(new File(replica.getReplicaPath()).listFiles((file, filename) -> filename.endsWith("index")))) {
file.delete();
}
}
}
cluster.reinitServer(0);
channel = getBlockingChannelBasedOnPortType(targetPort, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
// Now verify that we can fetch blob1 and blob2.
for (BlobId blobId : new BlobId[] { blobId1, blobId2 }) {
long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
while (true) {
// get blob properties
ArrayList<BlobId> ids = new ArrayList<>();
ids.add(blobId);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionIds.get(0), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
stream = channel.sendAndReceive(getRequest).getInputStream();
GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
if (getResponse.getPartitionResponseInfoList().get(0).getErrorCode() == ServerErrorCode.No_Error) {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
assertEquals(31870, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
releaseNettyBufUnderneathStream(stream);
break;
} else {
Thread.sleep(1000);
if (System.currentTimeMillis() > deadline) {
throw new TimeoutException("Fail to get blob " + blobId + " at " + channel.getRemoteHost());
}
}
}
}
channel.disconnect();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.
the class VcrBackupTest method basicTest.
/**
* Basic test to make sure VCR can backup with HelixVcrCluster.
*/
@Test
public void basicTest() throws Exception {
List<BlobId> blobIds = sendBlobToDataNode(dataNode, 10);
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, clusterMapPort, 12410, 12510, serverSSLProps, vcrHelixStateModelFactoryClass, true);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, mockCluster.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), mockCluster.getClusterAgentsFactory(), notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
// Verify a blob by making a http2 request.
MockClusterMap clusterMap = mockCluster.getClusterMap();
SSLConfig clientSSLConfig = new SSLConfig(new VerifiableProperties(clientSSLProps));
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(clusterMap.getDataNodes().get(0).getHttp2Port(), PortType.HTTP2), "localhost", null, clientSSLConfig);
BlobId blobToVerify = blobIds.get(0);
ArrayList<BlobId> idList = new ArrayList<>(Arrays.asList(blobToVerify));
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobToVerify.getPartition(), idList);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest1 = new GetRequest(1, "clientid1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
DataInputStream stream = channel.sendAndReceive(getRequest1).getInputStream();
GetResponse resp1 = GetResponse.readFrom(stream, clusterMap);
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp1.getInputStream());
// Do a simple check
assertEquals(blobSize, propertyOutput.getBlobSize());
releaseNettyBufUnderneathStream(stream);
} catch (MessageFormatException e) {
fail();
}
vcrServer.shutdown();
assertTrue("VCR server shutdown timeout.", vcrServer.awaitShutdown(5000));
}
use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.
the class Verifier method run.
@Override
public void run() {
try {
List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (requestsVerified.get() != totalRequests.get() && !cancelTest.get()) {
Payload payload = payloadQueue.poll(1000, TimeUnit.MILLISECONDS);
if (payload != null) {
notificationSystem.awaitBlobCreations(payload.blobId);
for (MockDataNodeId dataNodeId : clusterMap.getDataNodes()) {
ConnectedChannel channel1 = null;
try {
BlobId blobId = new BlobId(payload.blobId, clusterMap);
Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
channel1 = connectionPool.checkOutConnection("localhost", port, 10000);
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
DataInputStream stream = channel1.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println(dataNodeId.getHostname() + " " + dataNodeId.getPort() + " " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
if (propertyOutput.getBlobSize() != payload.blobProperties.getBlobSize()) {
String exceptionMsg = "blob size not matching " + " expected " + payload.blobProperties.getBlobSize() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (!propertyOutput.getServiceId().equals(payload.blobProperties.getServiceId())) {
String exceptionMsg = "service id not matching " + " expected " + payload.blobProperties.getServiceId() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getAccountId() != payload.blobProperties.getAccountId()) {
String exceptionMsg = "accountid not matching " + " expected " + payload.blobProperties.getAccountId() + " actual " + propertyOutput.getAccountId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getContainerId() != payload.blobProperties.getContainerId()) {
String exceptionMsg = "containerId not matching " + " expected " + payload.blobProperties.getContainerId() + " actual " + propertyOutput.getContainerId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.isEncrypted() != payload.blobProperties.isEncrypted()) {
String exceptionMsg = "IsEncrypted not matching " + " expected " + payload.blobProperties.isEncrypted() + " actual " + propertyOutput.isEncrypted();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobproperty");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException(e);
}
}
// get user metadata
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get user metadata " + resp.getError());
throw new IllegalStateException();
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
if (userMetadataOutput.compareTo(ByteBuffer.wrap(payload.metadata)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in usermetadatga");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
// System.out.println("response from get " + resp.getError());
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = new byte[(int) blobData.getSize()];
ByteBuf buffer = blobData.content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobdata");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), new BlobIdFactory(clusterMap));
byte[] blobout = new byte[(int) blobAll.getBlobData().getSize()];
ByteBuf buffer = blobAll.getBlobData().content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in bloball");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
if (payload.blobProperties.getTimeToLiveInSeconds() != Utils.Infinite_Time) {
// ttl update, check and wait for replication
ServerTestUtil.updateBlobTtl(channel1, new BlobId(payload.blobId, clusterMap), time.milliseconds());
ServerTestUtil.checkTtlUpdateStatus(channel1, clusterMap, new BlobIdFactory(clusterMap), blobId, payload.blob, true, Utils.Infinite_Time);
notificationSystem.awaitBlobUpdates(payload.blobId, UpdateType.TTL_UPDATE);
BlobProperties old = payload.blobProperties;
payload.blobProperties = new BlobProperties(old.getBlobSize(), old.getServiceId(), old.getOwnerId(), old.getContentType(), old.isEncrypted(), Utils.Infinite_Time, old.getCreationTimeInMs(), old.getAccountId(), old.getContainerId(), old.isEncrypted(), old.getExternalAssetTag(), old.getContentEncoding(), old.getFilename());
}
} catch (Exception e) {
if (channel1 != null) {
connectionPool.destroyConnection(channel1);
channel1 = null;
}
} finally {
if (channel1 != null) {
connectionPool.checkInConnection(channel1);
channel1 = null;
}
}
}
requestsVerified.incrementAndGet();
}
}
} catch (Exception e) {
e.printStackTrace();
cancelTest.set(true);
} finally {
completedLatch.countDown();
}
}
use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.
the class VcrRecoveryTest method testGetOnRecoveryNode.
/**
* Do a get on recovery node to test that all the blobids that were uploaded to vcr node have been recovered on recovery node.
* @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
* @throws IOException on {@link IOException}
*/
private void testGetOnRecoveryNode(Map<BlobId, Integer> blobIdToSizeMap) throws IOException {
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(recoveryNodePort, "localhost", null, null);
channel.connect();
AtomicInteger correlationIdGenerator = new AtomicInteger(0);
List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, blobIds));
GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + recoveryNode.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
channel.send(getRequest);
GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
// old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
}
}
}
use of com.github.ambry.protocol.GetRequest in project ambry by linkedin.
the class AmbryServerRequestsTest method sendAndVerifyOperationRequest.
/**
* Sends and verifies that an operation specific request works correctly.
* @param requestType the type of the request to send.
* @param ids the partitionIds to send requests for.
* @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
* response in the constituents rather than the actual response ({@link GetResponse} and
* {@link ReplicaMetadataResponse}).
* @param forceCheckOpReceived if {@code true}, checks the operation received at the {@link Store} even if
* there is an error expected. Always checks op received if {@code expectedErrorCode} is
* {@link ServerErrorCode#No_Error}. Skips the check otherwise.
* @param clientIdStr the clientId string to construct request. if null, generate a random string as clientId.
* @throws InterruptedException
* @throws IOException
* @return a list of {@link Response}(s) associated with given partition ids.
*/
private List<Response> sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode, Boolean forceCheckOpReceived, String clientIdStr) throws InterruptedException, IOException {
List<Response> responses = new ArrayList<>();
for (PartitionId id : ids) {
int correlationId = TestUtils.RANDOM.nextInt();
String clientId = clientIdStr == null ? TestUtils.getRandomString(10) : clientIdStr;
BlobId originalBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false, BlobId.BlobDataType.DATACHUNK);
BlobId convertedBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.CRAFTED, ClusterMap.UNKNOWN_DATACENTER_ID, originalBlobId.getAccountId(), originalBlobId.getContainerId(), id, false, BlobId.BlobDataType.DATACHUNK);
conversionMap.put(originalBlobId, convertedBlobId);
validKeysInStore.add(convertedBlobId);
RequestOrResponse request;
switch(requestType) {
case PutRequest:
BlobProperties properties = new BlobProperties(0, "serviceId", originalBlobId.getAccountId(), originalBlobId.getAccountId(), false);
request = new PutRequest(correlationId, clientId, originalBlobId, properties, ByteBuffer.allocate(0), Unpooled.wrappedBuffer(ByteBuffer.allocate(0)), 0, BlobType.DataBlob, null);
break;
case DeleteRequest:
request = new DeleteRequest(correlationId, clientId, originalBlobId, SystemTime.getInstance().milliseconds());
break;
case UndeleteRequest:
request = new UndeleteRequest(correlationId, clientId, originalBlobId, SystemTime.getInstance().milliseconds());
break;
case GetRequest:
PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(originalBlobId));
request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
break;
case ReplicaMetadataRequest:
ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, findTokenHelper.getFindTokenFactoryFromReplicaType(ReplicaType.DISK_BACKED).getNewFindToken(), "localhost", "/tmp", ReplicaType.DISK_BACKED, replicationConfig.replicaMetadataRequestVersion);
request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE, replicationConfig.replicaMetadataRequestVersion);
break;
case TtlUpdateRequest:
request = new TtlUpdateRequest(correlationId, clientId, originalBlobId, Utils.Infinite_Time, SystemTime.getInstance().milliseconds());
break;
default:
throw new IllegalArgumentException(requestType + " not supported by this function");
}
responses.add(sendAndVerifyOperationRequest(request, expectedErrorCode, forceCheckOpReceived));
}
return responses;
}
Aggregations