use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryRequestsTest method sendAndVerifyReplicationControlRequest.
/**
* Sends and verifies that a {@link AdminRequestOrResponseType#ReplicationControl} request received the error code
* expected and that {@link AmbryRequests} sent the right details to {@link ReplicationManager}.
* @param origins the list of datacenters from which replication should be enabled/disabled.
* @param enable {@code true} if replication needs to be enabled. {@code false} otherwise.
* @param id the {@link PartitionId} to send the request for. Can be {@code null}.
* @param expectedServerErrorCode the {@link ServerErrorCode} expected in the response.
* @throws InterruptedException
* @throws IOException
*/
private void sendAndVerifyReplicationControlRequest(List<String> origins, boolean enable, PartitionId id, ServerErrorCode expectedServerErrorCode) throws InterruptedException, IOException {
int correlationId = TestUtils.RANDOM.nextInt();
String clientId = UtilsTest.getRandomString(10);
AdminRequest adminRequest = new AdminRequest(AdminRequestOrResponseType.ReplicationControl, id, correlationId, clientId);
ReplicationControlAdminRequest controlRequest = new ReplicationControlAdminRequest(origins, enable, adminRequest);
Response response = sendRequestGetResponse(controlRequest, expectedServerErrorCode);
assertTrue("Response not of type AdminResponse", response instanceof AdminResponse);
List<PartitionId> idsVal;
if (id == null) {
idsVal = clusterMap.getAllPartitionIds();
} else {
idsVal = Collections.singletonList(id);
}
if (!expectedServerErrorCode.equals(ServerErrorCode.Unknown_Error)) {
assertEquals("Origins not as provided in request", origins, replicationManager.originsVal);
assertEquals("Enable not as provided in request", enable, replicationManager.enableVal);
assertEquals("Ids not as provided in request", idsVal.size(), replicationManager.idsVal.size());
assertTrue("Ids not as provided in request", replicationManager.idsVal.containsAll(idsVal));
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AmbryRequestsTest method sendAndVerifyOperationRequest.
/**
* Sends and verifies that an operation specific request works correctly.
* @param requestType the type of the request to send.
* @param ids the partitionIds to send requests for.
* @param expectedErrorCode the {@link ServerErrorCode} expected in the response. For some requests this is the
* response in the constituents rather than the actual response ({@link GetResponse} and
* {@link ReplicaMetadataResponse}).
* @throws InterruptedException
* @throws IOException
*/
private void sendAndVerifyOperationRequest(RequestOrResponseType requestType, List<? extends PartitionId> ids, ServerErrorCode expectedErrorCode) throws InterruptedException, IOException {
for (PartitionId id : ids) {
int correlationId = TestUtils.RANDOM.nextInt();
String clientId = UtilsTest.getRandomString(10);
BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), id, false);
RequestOrResponse request;
switch(requestType) {
case PutRequest:
BlobProperties properties = new BlobProperties(0, "serviceId", blobId.getAccountId(), blobId.getAccountId(), false);
request = new PutRequest(correlationId, clientId, blobId, properties, ByteBuffer.allocate(0), ByteBuffer.allocate(0), 0, BlobType.DataBlob, null);
break;
case DeleteRequest:
request = new DeleteRequest(correlationId, clientId, blobId, SystemTime.getInstance().milliseconds());
break;
case GetRequest:
PartitionRequestInfo pRequestInfo = new PartitionRequestInfo(id, Collections.singletonList(blobId));
request = new GetRequest(correlationId, clientId, MessageFormatFlags.All, Collections.singletonList(pRequestInfo), GetOption.Include_All);
break;
case ReplicaMetadataRequest:
ReplicaMetadataRequestInfo rRequestInfo = new ReplicaMetadataRequestInfo(id, FIND_TOKEN_FACTORY.getNewFindToken(), "localhost", "/tmp");
request = new ReplicaMetadataRequest(correlationId, clientId, Collections.singletonList(rRequestInfo), Long.MAX_VALUE);
break;
default:
throw new IllegalArgumentException(requestType + " not supported by this function");
}
storageManager.resetStore();
Response response = sendRequestGetResponse(request, requestType == RequestOrResponseType.GetRequest || requestType == RequestOrResponseType.ReplicaMetadataRequest ? ServerErrorCode.No_Error : expectedErrorCode);
if (expectedErrorCode.equals(ServerErrorCode.No_Error)) {
assertEquals("Operation received at the store not as expected", requestType, MockStorageManager.operationReceived);
}
if (requestType == RequestOrResponseType.GetRequest) {
GetResponse getResponse = (GetResponse) response;
for (PartitionResponseInfo info : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error code does not match expected", expectedErrorCode, info.getErrorCode());
}
} else if (requestType == RequestOrResponseType.ReplicaMetadataRequest) {
ReplicaMetadataResponse replicaMetadataResponse = (ReplicaMetadataResponse) response;
for (ReplicaMetadataResponseInfo info : replicaMetadataResponse.getReplicaMetadataResponseInfoList()) {
assertEquals("Error code does not match expected", expectedErrorCode, info.getError());
}
}
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class StorageManagerTest method scheduleAndDisableCompactionTest.
/**
* Tests that schedule compaction and disable compaction in StorageManager
* @throws Exception
*/
@Test
public void scheduleAndDisableCompactionTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
List<MockDataNodeId> dataNodes = new ArrayList<>();
dataNodes.add(dataNode);
MockPartitionId invalidPartition = new MockPartitionId(Long.MAX_VALUE, dataNodes, 0);
List<? extends ReplicaId> invalidPartitionReplicas = invalidPartition.getReplicaIds();
StorageManager storageManager = createStorageManager(replicas, metricRegistry);
storageManager.start();
// add invalid replica id
replicas.add(invalidPartitionReplicas.get(0));
for (int i = 0; i < replicas.size(); i++) {
ReplicaId replica = replicas.get(i);
PartitionId id = replica.getPartitionId();
if (i == replicas.size() - 1) {
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
assertFalse("Disable compaction should fail", storageManager.disableCompactionForBlobStore(id));
} else {
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
}
}
ReplicaId replica = replicas.get(0);
PartitionId id = replica.getPartitionId();
assertTrue("Disable compaction should succeed", storageManager.disableCompactionForBlobStore(id));
assertFalse("Schedule compaction should fail", storageManager.scheduleNextForCompaction(id));
replica = replicas.get(1);
id = replica.getPartitionId();
assertTrue("Schedule compaction should succeed", storageManager.scheduleNextForCompaction(id));
replicas.remove(replicas.size() - 1);
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ServerAdminTool method getGetResponse.
/**
* Sends a {@link GetRequest} based on the provided parameters and returns the response stream if the request was
* successful. {@code null} otherwise.
* @param dataNodeId the {@link DataNodeId} to contact.
* @param blobId the {@link BlobId} to operate on.
* @param flags the {@link MessageFormatFlags} associated with the {@link GetRequest}.
* @param getOption the {@link GetOption} to send with the {@link GetRequest}.
* @param clusterMap the {@link ClusterMap} to use.
* @return the {@link ServerErrorCode} and response stream if the request was successful. {@code null} for the
* response stream otherwise.
* @throws Exception
*/
private Pair<ServerErrorCode, InputStream> getGetResponse(DataNodeId dataNodeId, BlobId blobId, MessageFormatFlags flags, GetOption getOption, ClusterMap clusterMap) throws Exception {
PartitionId partitionId = blobId.getPartition();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionId, Collections.singletonList(blobId));
List<PartitionRequestInfo> partitionRequestInfos = new ArrayList<>();
partitionRequestInfos.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(correlationId.incrementAndGet(), CLIENT_ID, flags, partitionRequestInfos, getOption);
ResponseInfo response = sendRequestGetResponse(dataNodeId, partitionId, getRequest);
InputStream serverResponseStream = new NettyByteBufDataInputStream(response.content());
response.release();
GetResponse getResponse = GetResponse.readFrom(new DataInputStream(serverResponseStream), clusterMap);
ServerErrorCode partitionErrorCode = getResponse.getPartitionResponseInfoList().get(0).getErrorCode();
ServerErrorCode errorCode = partitionErrorCode == ServerErrorCode.No_Error ? getResponse.getError() : partitionErrorCode;
InputStream stream = errorCode == ServerErrorCode.No_Error ? getResponse.getInputStream() : null;
return new Pair<>(errorCode, stream);
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class StorageManagerTest method setBlobStoreStoppedStateWithMultiDelegatesTest.
/**
* Test setting blob stop state in two clusters (if server participates into two Helix clusters)
* @throws Exception
*/
@Test
public void setBlobStoreStoppedStateWithMultiDelegatesTest() throws Exception {
MockDataNodeId dataNode = clusterMap.getDataNodes().get(0);
List<ReplicaId> replicas = clusterMap.getReplicaIds(dataNode);
MockClusterParticipant mockClusterParticipant1 = new MockClusterParticipant();
MockClusterParticipant mockClusterParticipant2 = new MockClusterParticipant(null, false);
List<ClusterParticipant> participants = Arrays.asList(mockClusterParticipant1, mockClusterParticipant2);
StorageManager storageManager = createStorageManager(dataNode, metricRegistry, participants);
storageManager.start();
PartitionId id = replicas.get(0).getPartitionId();
// test that any delegate fails to update stop state, then the whole operation fails
List<PartitionId> failToUpdateList = storageManager.setBlobStoreStoppedState(Collections.singletonList(id), true);
assertEquals("Set store stopped state should fail because one of delegates returns false", id, failToUpdateList.get(0));
// test the success case, both delegates succeed in updating stop state of replica
mockClusterParticipant2.setStopStateReturnVal = null;
failToUpdateList = storageManager.setBlobStoreStoppedState(Collections.singletonList(id), true);
assertTrue("Set store stopped state should succeed", failToUpdateList.isEmpty());
// verify both delegates have the correct stopped replica list.
List<String> expectedStoppedReplicas = Collections.singletonList(id.toPathString());
assertEquals("Stopped replica list from participant 1 is not expected", expectedStoppedReplicas, mockClusterParticipant1.getStoppedReplicas());
assertEquals("Stopped replica list from participant 2 is not expected", expectedStoppedReplicas, mockClusterParticipant2.getStoppedReplicas());
shutdownAndAssertStoresInaccessible(storageManager, replicas);
}
Aggregations