use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class BlobValidator method validateBlobOnDatacenter.
/**
* Validates {@code blobId} on all of its replicas in {@code datacenter}
* @param datacenter the datacenter in which {@code blobId} have to be validated.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to use with the {@link com.github.ambry.protocol.GetRequest}.
* @param clusterMap the {@link ClusterMap} instance to use.
* @param storeKeyFactory the {@link StoreKeyFactory} to use.
* @return a list of details if there are mismatches. Zero sized list if there aren't any mismatches.
* @throws InterruptedException
*/
private List<String> validateBlobOnDatacenter(String datacenter, BlobId blobId, GetOption getOption, ClusterMap clusterMap, StoreKeyFactory storeKeyFactory) throws InterruptedException {
Map<DataNodeId, ServerResponse> dataNodeIdBlobContentMap = new HashMap<>();
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
if (replicaId.getDataNodeId().getDatacenterName().equalsIgnoreCase(datacenter)) {
ServerResponse response = getRecordFromNode(replicaId.getDataNodeId(), blobId, getOption, clusterMap, storeKeyFactory);
dataNodeIdBlobContentMap.put(replicaId.getDataNodeId(), response);
}
}
return getMismatchDetails(blobId.getID(), dataNodeIdBlobContentMap);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class BlobValidator method main.
/**
* Runs the BlobValidator
* @param args associated arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
BlobValidatorConfig config = new BlobValidatorConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap();
List<BlobId> blobIds = getBlobIds(config, clusterMap);
SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
BlobValidator validator = new BlobValidator(clusterMap, config.replicasToContactPerSec, sslFactory, verifiableProperties);
LOGGER.info("Validation starting");
switch(config.typeOfOperation) {
case ValidateBlobOnAllReplicas:
Map<BlobId, List<String>> mismatchDetailsMap = validator.validateBlobsOnAllReplicas(blobIds, config.getOption, clusterMap, storeKeyFactory);
logMismatches(mismatchDetailsMap);
break;
case ValidateBlobOnDatacenter:
if (config.datacenter.isEmpty() || !clusterMap.hasDatacenter(config.datacenter)) {
throw new IllegalArgumentException("Please provide a valid datacenter");
}
mismatchDetailsMap = validator.validateBlobsOnDatacenter(config.datacenter, blobIds, config.getOption, clusterMap, storeKeyFactory);
logMismatches(mismatchDetailsMap);
break;
case ValidateBlobOnReplica:
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
if (dataNodeId == null) {
throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
}
List<ServerErrorCode> validErrorCodes = Arrays.asList(ServerErrorCode.No_Error, ServerErrorCode.Blob_Deleted, ServerErrorCode.Blob_Expired);
Map<BlobId, ServerErrorCode> blobIdToErrorCode = validator.validateBlobsOnReplica(dataNodeId, blobIds, config.getOption, clusterMap, storeKeyFactory);
for (Map.Entry<BlobId, ServerErrorCode> entry : blobIdToErrorCode.entrySet()) {
ServerErrorCode errorCode = entry.getValue();
if (!validErrorCodes.contains(errorCode)) {
LOGGER.error("[{}] received error code: {}", entry.getKey(), errorCode);
}
}
break;
default:
throw new IllegalStateException("Recognized but unsupported operation: " + config.typeOfOperation);
}
LOGGER.info("Validation complete");
validator.close();
clusterMap.close();
}
Aggregations