use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryServerRequestsTest method sendAndVerifyOperationRequest.
/**
* Sends and verifies that an operation specific request works correctly.
* @param requestType the type of the request to send.
* @param ids the partitionIds to send requests for.
* @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
* response in the constituents rather than the actual response ({@link GetResponse} and
* {@link ReplicaMetadataResponse}).
* @param forceCheckOpReceived if {@code true}, checks the operation received at the {@link Store} even if
* there is an error expected. Always checks op received if {@code expectedErrorCode} is
* {@link ServerErrorCode#No_Error}. Skips the check otherwise.
* @param clientIdStr the clientId string to construct request. if null, generate a random string as clientId.
* @throws InterruptedException
* @throws IOException
* @return a list of {@link Response}(s) associated with given partition ids.
*/
private List<Response> sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode, Boolean forceCheckOpReceived, String clientIdStr) throws InterruptedException, IOException {
List<Response> responses = new ArrayList<>();
for (PartitionId id : ids) {
int correlationId = TestUtils.RANDOM.nextInt();
String clientId = clientIdStr == null ? TestUtils.getRandomString(10) : clientIdStr;
BlobId originalBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false, BlobId.BlobDataType.DATACHUNK);
BlobId convertedBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.CRAFTED, ClusterMap.UNKNOWN_DATACENTER_ID, originalBlobId.getAccountId(), originalBlobId.getContainerId(), id, false, BlobId.BlobDataType.DATACHUNK);
conversionMap.put(originalBlobId, convertedBlobId);
validKeysInStore.add(convertedBlobId);
RequestOrResponse request;
switch(requestType) {
case PutRequest:
BlobProperties properties = new BlobProperties(0, "serviceId", originalBlobId.getAccountId(), originalBlobId.getAccountId(), false);
request = new PutRequest(correlationId, clientId, originalBlobId, properties, ByteBuffer.allocate(0), Unpooled.wrappedBuffer(ByteBuffer.allocate(0)), 0, BlobType.DataBlob, null);
break;
case DeleteRequest:
request = new DeleteRequest(correlationId, clientId, originalBlobId, SystemTime.getInstance().milliseconds());
break;
case UndeleteRequest:
request = new UndeleteRequest(correlationId, clientId, originalBlobId, SystemTime.getInstance().milliseconds());
break;
case GetRequest:
PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(originalBlobId));
request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
break;
case ReplicaMetadataRequest:
ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, findTokenHelper.getFindTokenFactoryFromReplicaType(ReplicaType.DISK_BACKED).getNewFindToken(), "localhost", "/tmp", ReplicaType.DISK_BACKED, replicationConfig.replicaMetadataRequestVersion);
request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE, replicationConfig.replicaMetadataRequestVersion);
break;
case TtlUpdateRequest:
request = new TtlUpdateRequest(correlationId, clientId, originalBlobId, Utils.Infinite_Time, SystemTime.getInstance().milliseconds());
break;
default:
throw new IllegalArgumentException(requestType + " not supported by this function");
}
responses.add(sendAndVerifyOperationRequest(request, expectedErrorCode, forceCheckOpReceived));
}
return responses;
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryServerRequestsTest method miscUndeleteFailuresTest.
/**
* Exercises various failure paths for UNDELETEs
* @throws Exception
*/
private void miscUndeleteFailuresTest() throws Exception {
PartitionId id = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS).get(0);
// store exceptions
for (StoreErrorCodes code : StoreErrorCodes.values()) {
MockStorageManager.storeException = code == StoreErrorCodes.ID_Undeleted ? new IdUndeletedStoreException("expected", (short) 1) : new StoreException("expected", code);
ServerErrorCode expectedErrorCode = ErrorMapping.getStoreErrorMapping(code);
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), expectedErrorCode, true, null);
MockStorageManager.storeException = null;
}
// runtime exception
MockStorageManager.runtimeException = new RuntimeException("expected");
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Unknown_Error, true, null);
MockStorageManager.runtimeException = null;
// store is not started/is stopped/otherwise unavailable - Replica_Unavailable
storageManager.returnNullStore = true;
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Replica_Unavailable, false, null);
storageManager.returnNullStore = false;
// PartitionUnknown is hard to simulate without betraying knowledge of the internals of MockClusterMap.
// disk down
ReplicaId replicaId = findReplica(id);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Error);
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Disk_Unavailable, false, null);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Ok);
// request disabled is checked in request control tests
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryServerRequestsTest method miscTtlUpdateFailuresTest.
/**
* Exercises various failure paths for TTL updates
* @throws InterruptedException
* @throws IOException
*/
private void miscTtlUpdateFailuresTest() throws InterruptedException, IOException {
PartitionId id = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS).get(0);
// store exceptions
for (StoreErrorCodes code : StoreErrorCodes.values()) {
MockStorageManager.storeException = new StoreException("expected", code);
ServerErrorCode expectedErrorCode = ErrorMapping.getStoreErrorMapping(code);
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), expectedErrorCode, true, null);
MockStorageManager.storeException = null;
}
// runtime exception
MockStorageManager.runtimeException = new RuntimeException("expected");
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Unknown_Error, true, null);
MockStorageManager.runtimeException = null;
// store is not started/is stopped/otherwise unavailable - Replica_Unavailable
storageManager.returnNullStore = true;
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Replica_Unavailable, false, null);
storageManager.returnNullStore = false;
// PartitionUnknown is hard to simulate without betraying knowledge of the internals of MockClusterMap.
// disk down
ReplicaId replicaId = findReplica(id);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Error);
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Disk_Unavailable, false, null);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Ok);
// request disabled is checked in request control tests
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryServerRequestsTest method controlReplicationSuccessTest.
/**
* Tests that {@link AdminRequestOrResponseType#ReplicationControl} works correctly.
* @throws InterruptedException
* @throws IOException
*/
@Test
public void controlReplicationSuccessTest() throws InterruptedException, IOException {
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS);
for (PartitionId id : partitionIds) {
doControlReplicationTest(id, ServerErrorCode.No_Error);
}
doControlReplicationTest(null, ServerErrorCode.No_Error);
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryServerRequestsTest method scheduleCompactionSuccessTest.
/**
* Tests that compactions are scheduled correctly.
* @throws InterruptedException
* @throws IOException
*/
@Test
public void scheduleCompactionSuccessTest() throws InterruptedException, IOException {
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS);
for (PartitionId id : partitionIds) {
doScheduleCompactionTest(id, ServerErrorCode.No_Error);
assertEquals("Partition scheduled for compaction not as expected", id, storageManager.compactionScheduledPartitionId);
}
}
Aggregations