use of org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse in project ozone by apache.
the class TestNSSummaryEndpoint method testFileSizeDist.
/**
* Bin 0: 2 -> file1 and file5.
* Bin 1: 1 -> file2.
* Bin 2: 2 -> file4 and file6.
* Bin 3: 1 -> file3.
* @throws Exception
*/
@Test
public void testFileSizeDist() throws Exception {
Response volRes = nsSummaryEndpoint.getFileSizeDistribution(VOL_PATH);
FileSizeDistributionResponse volFileSizeDistResObj = (FileSizeDistributionResponse) volRes.getEntity();
// If the volume has the correct file size distribution,
// other lower level should be correct as well, given all
// other previous tests have passed.
int[] volFileSizeDist = volFileSizeDistResObj.getFileSizeDist();
for (int i = 0; i < ReconConstants.NUM_OF_BINS; ++i) {
if (i == 0 || i == 2) {
Assert.assertEquals(2, volFileSizeDist[i]);
} else if (i == 1 || i == 3) {
Assert.assertEquals(1, volFileSizeDist[i]);
} else {
Assert.assertEquals(0, volFileSizeDist[i]);
}
}
}
use of org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse in project ozone by apache.
the class NSSummaryEndpoint method getFileSizeDistribution.
/**
* Endpoint that returns aggregate file size distribution under a path.
* @param path request path
* @return File size distribution response
* @throws IOException
*/
@GET
@Path("/dist")
public Response getFileSizeDistribution(@QueryParam("path") String path) throws IOException {
if (path == null || path.length() == 0) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
FileSizeDistributionResponse distResponse = new FileSizeDistributionResponse();
if (!isInitializationComplete()) {
distResponse.setStatus(ResponseStatus.INITIALIZING);
return Response.ok(distResponse).build();
}
String normalizedPath = normalizePath(path);
String[] names = parseRequestPath(normalizedPath);
EntityType type = getEntityType(normalizedPath, names);
switch(type) {
case ROOT:
List<OmBucketInfo> allBuckets = listBucketsUnderVolume(null);
int[] fileSizeDist = new int[ReconConstants.NUM_OF_BINS];
// accumulate file size distribution arrays from all buckets
for (OmBucketInfo bucket : allBuckets) {
long bucketObjectId = bucket.getObjectID();
int[] bucketFileSizeDist = getTotalFileSizeDist(bucketObjectId);
// add on each bin
for (int i = 0; i < ReconConstants.NUM_OF_BINS; ++i) {
fileSizeDist[i] += bucketFileSizeDist[i];
}
}
distResponse.setFileSizeDist(fileSizeDist);
break;
case VOLUME:
List<OmBucketInfo> buckets = listBucketsUnderVolume(names[0]);
int[] volumeFileSizeDist = new int[ReconConstants.NUM_OF_BINS];
// accumulate file size distribution arrays from all buckets under volume
for (OmBucketInfo bucket : buckets) {
long bucketObjectId = bucket.getObjectID();
int[] bucketFileSizeDist = getTotalFileSizeDist(bucketObjectId);
// add on each bin
for (int i = 0; i < ReconConstants.NUM_OF_BINS; ++i) {
volumeFileSizeDist[i] += bucketFileSizeDist[i];
}
}
distResponse.setFileSizeDist(volumeFileSizeDist);
break;
case BUCKET:
long bucketObjectId = getBucketObjectId(names);
int[] bucketFileSizeDist = getTotalFileSizeDist(bucketObjectId);
distResponse.setFileSizeDist(bucketFileSizeDist);
break;
case DIRECTORY:
long dirObjectId = getDirObjectId(names);
int[] dirFileSizeDist = getTotalFileSizeDist(dirObjectId);
distResponse.setFileSizeDist(dirFileSizeDist);
break;
case KEY:
// key itself doesn't have file size distribution
distResponse.setStatus(ResponseStatus.TYPE_NOT_APPLICABLE);
break;
case UNKNOWN:
distResponse.setStatus(ResponseStatus.PATH_NOT_FOUND);
break;
default:
break;
}
return Response.ok(distResponse).build();
}
Aggregations