use of com.linkedin.pinot.common.restlet.resources.SegmentSizeInfo in project pinot by linkedin.
the class TableSizeReader method populateErroredServerSizes.
// for servers that reported error, populate segment size with -1
private void populateErroredServerSizes(Map<String, List<SegmentSizeInfo>> serverSizeInfo, Map<String, List<String>> serverSegmentsMap) {
ImmutableSet<String> erroredServers = null;
try {
erroredServers = Sets.difference(serverSegmentsMap.keySet(), serverSizeInfo.keySet()).immutableCopy();
} catch (Exception e) {
LOGGER.error("Failed to get set difference: ", e);
}
for (String server : erroredServers) {
List<String> serverSegments = serverSegmentsMap.get(server);
Preconditions.checkNotNull(serverSegments);
List<SegmentSizeInfo> serverSegmentSizes = new ArrayList<>(serverSegments.size());
serverSizeInfo.put(server, serverSegmentSizes);
for (String segment : serverSegments) {
// this populates segment size info with size -1
serverSegmentSizes.add(new SegmentSizeInfo(segment, -1));
}
}
}
use of com.linkedin.pinot.common.restlet.resources.SegmentSizeInfo in project pinot by linkedin.
the class TableSizeReader method getTableSubtypeSize.
public TableSubTypeSizeDetails getTableSubtypeSize(String tableNameWithType, int timeoutMsec) {
// for convenient usage within this function
final String table = tableNameWithType;
// get list of servers
Map<String, List<String>> serverSegmentsMap = helixResourceManager.getInstanceToSegmentsInATableMap(table);
ServerTableSizeReader serverTableSizeReader = new ServerTableSizeReader(executor, connectionManager);
BiMap<String, String> endpoints = helixResourceManager.getDataInstanceAdminEndpoints(serverSegmentsMap.keySet());
Map<String, List<SegmentSizeInfo>> serverSizeInfo = serverTableSizeReader.getSizeDetailsFromServers(endpoints, table, timeoutMsec);
populateErroredServerSizes(serverSizeInfo, serverSegmentsMap);
TableSubTypeSizeDetails subTypeSizeDetails = new TableSubTypeSizeDetails();
Map<String, SegmentSizeDetails> segmentMap = subTypeSizeDetails.segments;
// convert from server ->SegmentSizes to segment -> (SegmentSizeDetails: server -> segmentSizes)
for (Map.Entry<String, List<SegmentSizeInfo>> serverSegments : serverSizeInfo.entrySet()) {
String server = serverSegments.getKey();
List<SegmentSizeInfo> segments = serverSegments.getValue();
for (SegmentSizeInfo segment : segments) {
SegmentSizeDetails sizeDetails = segmentMap.get(segment.segmentName);
if (sizeDetails == null) {
sizeDetails = new SegmentSizeDetails();
segmentMap.put(segment.segmentName, sizeDetails);
}
sizeDetails.serverInfo.put(server, segment);
}
}
// iterate through the map of segments and calculate the reported and estimated sizes
// for each segment. For servers that reported error, we use the max size of the same segment
// reported by another server. If no server reported size for a segment, we use the size
// of the largest segment reported by any server for the table.
// At all times, reportedSize indicates actual size that is reported by servers. For errored
// segments are not reflected in that count. Estimated size is what we estimate in case of
// errors, as described above.
// estimatedSize >= reportedSize. If no server reported error, estimatedSize == reportedSize
long tableLevelMax = -1;
for (Map.Entry<String, SegmentSizeDetails> segmentEntry : segmentMap.entrySet()) {
SegmentSizeDetails segmentSizes = segmentEntry.getValue();
// track segment level max size
long segmentLevelMax = -1;
int errors = 0;
// iterate over all servers that reported size for this segment
for (Map.Entry<String, SegmentSizeInfo> serverInfo : segmentSizes.serverInfo.entrySet()) {
SegmentSizeInfo ss = serverInfo.getValue();
if (ss.diskSizeInBytes != -1) {
segmentSizes.reportedSizeInBytes += ss.diskSizeInBytes;
segmentLevelMax = Math.max(segmentLevelMax, ss.diskSizeInBytes);
} else {
++errors;
}
}
// after iterating over all servers update summary reported and estimated size of the segment
if (errors != segmentSizes.serverInfo.size()) {
// atleast one server reported size for this segment
segmentSizes.estimatedSizeInBytes = segmentSizes.reportedSizeInBytes + errors * segmentLevelMax;
tableLevelMax = Math.max(tableLevelMax, segmentLevelMax);
subTypeSizeDetails.reportedSizeInBytes += segmentSizes.reportedSizeInBytes;
subTypeSizeDetails.estimatedSizeInBytes += segmentSizes.estimatedSizeInBytes;
} else {
segmentSizes.reportedSizeInBytes = -1;
segmentSizes.estimatedSizeInBytes = -1;
}
}
if (tableLevelMax == -1) {
// no server reported size
subTypeSizeDetails.reportedSizeInBytes = -1;
subTypeSizeDetails.estimatedSizeInBytes = -1;
} else {
// For segments with no reported sizes, use max table-level segment size as an estimate
for (Map.Entry<String, SegmentSizeDetails> segmentSizeDetailsEntry : segmentMap.entrySet()) {
SegmentSizeDetails sizeDetails = segmentSizeDetailsEntry.getValue();
if (sizeDetails.reportedSizeInBytes != -1) {
continue;
}
sizeDetails.estimatedSizeInBytes += sizeDetails.serverInfo.size() * tableLevelMax;
subTypeSizeDetails.estimatedSizeInBytes += sizeDetails.estimatedSizeInBytes;
}
}
return subTypeSizeDetails;
}
use of com.linkedin.pinot.common.restlet.resources.SegmentSizeInfo in project pinot by linkedin.
the class ServerTableSizeReaderTest method createTableSizeInfo.
private TableSizeInfo createTableSizeInfo(String tableName, List<Integer> segmentIndexes) {
TableSizeInfo tableSizeInfo = new TableSizeInfo();
tableSizeInfo.tableName = tableName;
tableSizeInfo.diskSizeInBytes = 0;
for (int segmentIndex : segmentIndexes) {
long size = segmentIndexToSize(segmentIndex);
tableSizeInfo.diskSizeInBytes += size;
SegmentSizeInfo s = new SegmentSizeInfo("seg" + segmentIndex, size);
tableSizeInfo.segments.add(s);
}
return tableSizeInfo;
}
use of com.linkedin.pinot.common.restlet.resources.SegmentSizeInfo in project pinot by linkedin.
the class TableSizeResource method getTableSize.
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/tables/{tableName}/size")
@ApiOperation(value = "Show table storage size", notes = "Lists size of all the segments of the table")
@ApiResponses(value = { @ApiResponse(code = 200, message = "Success"), @ApiResponse(code = 500, message = "Internal server error"), @ApiResponse(code = 404, message = "Table not found") })
public TableSizeInfo getTableSize(@ApiParam(value = "Table Name with type", required = true) @PathParam("tableName") String tableName, @ApiParam(value = "Provide detailed information", required = false) @DefaultValue("true") @QueryParam("detailed") boolean detailed) throws WebApplicationException {
InstanceDataManager dataManager = (InstanceDataManager) serverInstance.getInstanceDataManager();
if (dataManager == null) {
throw new WebApplicationException("Invalid server initialization", Response.Status.INTERNAL_SERVER_ERROR);
}
TableDataManager tableDataManager = dataManager.getTableDataManager(tableName);
if (tableDataManager == null) {
throw new WebApplicationException("Table: " + tableName + " is not found", Response.Status.NOT_FOUND);
}
TableSizeInfo tableSizeInfo = new TableSizeInfo();
tableSizeInfo.tableName = tableDataManager.getTableName();
tableSizeInfo.diskSizeInBytes = 0L;
ImmutableList<SegmentDataManager> segmentDataManagers = tableDataManager.acquireAllSegments();
try {
for (SegmentDataManager segmentDataManager : segmentDataManagers) {
IndexSegment segment = segmentDataManager.getSegment();
long segmentSizeBytes = segment.getDiskSizeBytes();
if (detailed) {
SegmentSizeInfo segmentSizeInfo = new SegmentSizeInfo(segment.getSegmentName(), segmentSizeBytes);
tableSizeInfo.segments.add(segmentSizeInfo);
}
tableSizeInfo.diskSizeInBytes += segmentSizeBytes;
}
} finally {
// executes fast so duration of holding segments is not a concern
for (SegmentDataManager segmentDataManager : segmentDataManagers) {
tableDataManager.releaseSegment(segmentDataManager);
}
}
//invalid to use the segmentDataManagers below
return tableSizeInfo;
}
use of com.linkedin.pinot.common.restlet.resources.SegmentSizeInfo in project pinot by linkedin.
the class TableSizeReaderTest method createHandler.
private HttpHandler createHandler(final int status, final List<SegmentSizeInfo> segmentSizes, final int sleepTimeMs) {
return new HttpHandler() {
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if (sleepTimeMs > 0) {
try {
Thread.sleep(sleepTimeMs);
} catch (InterruptedException e) {
LOGGER.info("Handler interrupted during sleep");
}
}
TableSizeInfo tableInfo = new TableSizeInfo("myTable", 0);
tableInfo.segments = segmentSizes;
for (SegmentSizeInfo segmentSize : segmentSizes) {
tableInfo.diskSizeInBytes += segmentSize.diskSizeInBytes;
}
String json = new ObjectMapper().writeValueAsString(tableInfo);
httpExchange.sendResponseHeaders(status, json.length());
OutputStream responseBody = httpExchange.getResponseBody();
responseBody.write(json.getBytes());
responseBody.close();
}
};
}
Aggregations