use of org.apache.druid.server.coordination.DataSegmentChangeRequest in project druid by druid-io.
the class SegmentListerResource method getSegments.
/**
* This endpoint is used by HttpServerInventoryView to keep an up-to-date list of segments served by
* historical/realtime nodes.
*
* This endpoint lists segments served by this server and can also incrementally provide the segments added/dropped
* since last response.
*
* Here is how, this is used.
*
* (1) Client sends first request /druid/internal/v1/segments?counter=-1&timeout=<timeout>
* Server responds with list of segments currently served and a <counter,hash> pair.
*
* (2) Client sends subsequent requests /druid/internal/v1/segments?counter=<counter>&hash=<hash>&timeout=<timeout>
* Where <counter,hash> values are used from the last response. Server responds with list of segment updates
* since given counter.
*
* This endpoint makes the client wait till either there is some segment update or given timeout elapses.
*
* So, clients keep on sending next request immediately after receiving the response in order to keep the list
* of segments served by this server up-to-date.
*
* @param counter counter received in last response.
* @param hash hash received in last response.
* @param timeout after which response is sent even if there are no new segment updates.
* @param req
* @return null to avoid "MUST return a non-void type" warning.
* @throws IOException
*/
@GET
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
public Void getSegments(@QueryParam("counter") long counter, @QueryParam("hash") long hash, @QueryParam("timeout") long timeout, @Context final HttpServletRequest req) throws IOException {
if (announcer == null) {
sendErrorResponse(req, HttpServletResponse.SC_NOT_FOUND, "announcer is not available.");
return null;
}
if (timeout <= 0) {
sendErrorResponse(req, HttpServletResponse.SC_BAD_REQUEST, "timeout must be positive.");
return null;
}
final ResponseContext context = createContext(req.getHeader("Accept"));
final ListenableFuture<ChangeRequestsSnapshot<DataSegmentChangeRequest>> future = announcer.getSegmentChangesSince(new ChangeRequestHistory.Counter(counter, hash));
final AsyncContext asyncContext = req.startAsync();
asyncContext.addListener(new AsyncListener() {
@Override
public void onComplete(AsyncEvent event) {
}
@Override
public void onTimeout(AsyncEvent event) {
// HTTP 204 NO_CONTENT is sent to the client.
future.cancel(true);
event.getAsyncContext().complete();
}
@Override
public void onError(AsyncEvent event) {
}
@Override
public void onStartAsync(AsyncEvent event) {
}
});
Futures.addCallback(future, new FutureCallback<ChangeRequestsSnapshot<DataSegmentChangeRequest>>() {
@Override
public void onSuccess(ChangeRequestsSnapshot<DataSegmentChangeRequest> result) {
try {
HttpServletResponse response = (HttpServletResponse) asyncContext.getResponse();
response.setStatus(HttpServletResponse.SC_OK);
context.inputMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValue(asyncContext.getResponse().getOutputStream(), result);
asyncContext.complete();
} catch (Exception ex) {
log.debug(ex, "Request timed out or closed already.");
}
}
@Override
public void onFailure(Throwable th) {
try {
HttpServletResponse response = (HttpServletResponse) asyncContext.getResponse();
if (th instanceof IllegalArgumentException) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST, th.getMessage());
} else {
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, th.getMessage());
}
asyncContext.complete();
} catch (Exception ex) {
log.debug(ex, "Request timed out or closed already.");
}
}
});
asyncContext.setTimeout(timeout);
return null;
}
use of org.apache.druid.server.coordination.DataSegmentChangeRequest in project druid by druid-io.
the class LoadQueuePeonTest method testMultipleLoadDropSegments.
@Test
public void testMultipleLoadDropSegments() throws Exception {
loadQueuePeon = new CuratorLoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 10, Duration.millis(0)));
loadQueuePeon.start();
ConcurrentMap<SegmentId, CountDownLatch> loadRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> dropRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentLoadedSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentDroppedSignals = new ConcurrentHashMap<>(5);
final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
return dataSegment;
}
});
final CountDownLatch[] dropRequestLatches = new CountDownLatch[5];
final CountDownLatch[] dropSegmentLatches = new CountDownLatch[5];
for (int i = 0; i < 5; i++) {
dropRequestLatches[i] = new CountDownLatch(1);
dropSegmentLatches[i] = new CountDownLatch(1);
}
int i = 0;
for (DataSegment s : segmentToDrop) {
dropRequestSignals.put(s.getId(), dropRequestLatches[i]);
segmentDroppedSignals.put(s.getId(), dropSegmentLatches[i++]);
}
final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
loadRequestSignals.put(dataSegment.getId(), new CountDownLatch(1));
segmentLoadedSignals.put(dataSegment.getId(), new CountDownLatch(1));
return dataSegment;
}
});
final CountDownLatch[] loadRequestLatches = new CountDownLatch[5];
final CountDownLatch[] segmentLoadedLatches = new CountDownLatch[5];
for (i = 0; i < 5; i++) {
loadRequestLatches[i] = new CountDownLatch(1);
segmentLoadedLatches[i] = new CountDownLatch(1);
}
i = 0;
for (DataSegment s : segmentToDrop) {
loadRequestSignals.put(s.getId(), loadRequestLatches[i]);
segmentLoadedSignals.put(s.getId(), segmentLoadedLatches[i++]);
}
// segment with latest interval should be loaded first
final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), intervalStr -> dataSegmentWithInterval(intervalStr));
final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {
@Override
public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
loadRequestSignals.get(segment.getId()).countDown();
}
@Override
public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
dropRequestSignals.get(segment.getId()).countDown();
}
};
loadQueueCache.getListenable().addListener((client, event) -> {
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
request.go(handler, null);
}
});
loadQueueCache.start();
for (final DataSegment segment : segmentToDrop) {
loadQueuePeon.dropSegment(segment, () -> segmentDroppedSignals.get(segment.getId()).countDown());
}
for (final DataSegment segment : segmentToLoad) {
loadQueuePeon.loadSegment(segment, () -> segmentLoadedSignals.get(segment.getId()).countDown());
}
Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
Assert.assertEquals(0, loadQueuePeon.getTimedOutSegments().size());
for (DataSegment segment : segmentToDrop) {
String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue("Latch not counted down for " + dropRequestSignals.get(segment.getId()), dropRequestSignals.get(segment.getId()).await(10, TimeUnit.SECONDS));
Assert.assertNotNull("Path " + dropRequestPath + " doesn't exist", curator.checkExists().forPath(dropRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of drop request by historical
curator.delete().guaranteed().forPath(dropRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignals.get(segment.getId())));
}
for (DataSegment segment : expectedLoadOrder) {
String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignals.get(segment.getId())));
Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of load request by historical
curator.delete().guaranteed().forPath(loadRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignals.get(segment.getId())));
}
}
use of org.apache.druid.server.coordination.DataSegmentChangeRequest in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method testSingleAnnounce.
@Test
public void testSingleAnnounce() throws Exception {
Iterator<DataSegment> segIter = testSegments.iterator();
DataSegment firstSegment = segIter.next();
DataSegment secondSegment = segIter.next();
segmentAnnouncer.announceSegment(firstSegment);
List<String> zNodes = cf.getChildren().forPath(TEST_SEGMENTS_PATH);
for (String zNode : zNodes) {
Set<DataSegment> segments = segmentReader.read(JOINER.join(TEST_SEGMENTS_PATH, zNode));
Assert.assertEquals(segments.iterator().next(), firstSegment);
}
segmentAnnouncer.announceSegment(secondSegment);
for (String zNode : zNodes) {
Set<DataSegment> segments = segmentReader.read(JOINER.join(TEST_SEGMENTS_PATH, zNode));
Assert.assertEquals(Sets.newHashSet(firstSegment, secondSegment), segments);
}
ChangeRequestsSnapshot<DataSegmentChangeRequest> snapshot = segmentAnnouncer.getSegmentChangesSince(new ChangeRequestHistory.Counter(-1, -1)).get();
Assert.assertEquals(2, snapshot.getRequests().size());
Assert.assertEquals(2, snapshot.getCounter().getCounter());
segmentAnnouncer.unannounceSegment(firstSegment);
for (String zNode : zNodes) {
Set<DataSegment> segments = segmentReader.read(JOINER.join(TEST_SEGMENTS_PATH, zNode));
Assert.assertEquals(segments.iterator().next(), secondSegment);
}
segmentAnnouncer.unannounceSegment(secondSegment);
Assert.assertTrue(cf.getChildren().forPath(TEST_SEGMENTS_PATH).isEmpty());
snapshot = segmentAnnouncer.getSegmentChangesSince(snapshot.getCounter()).get();
Assert.assertEquals(2, snapshot.getRequests().size());
Assert.assertEquals(4, snapshot.getCounter().getCounter());
snapshot = segmentAnnouncer.getSegmentChangesSince(new ChangeRequestHistory.Counter(-1, -1)).get();
Assert.assertEquals(0, snapshot.getRequests().size());
Assert.assertEquals(4, snapshot.getCounter().getCounter());
}
use of org.apache.druid.server.coordination.DataSegmentChangeRequest in project druid by druid-io.
the class HttpLoadQueuePeon method doSegmentManagement.
private void doSegmentManagement() {
if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
log.trace("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
return;
}
final int batchSize = config.getHttpLoadQueuePeonBatchSize();
final List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);
synchronized (lock) {
Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators.concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());
while (newRequests.size() < batchSize && iter.hasNext()) {
Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
if (entry.getValue().hasTimedOut()) {
entry.getValue().requestFailed("timed out");
iter.remove();
} else {
newRequests.add(entry.getValue().getChangeRequest());
}
}
}
if (newRequests.size() == 0) {
log.trace("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].", serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
mainLoopInProgress.set(false);
return;
}
try {
log.trace("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
ListenableFuture<InputStream> future = httpClient.go(new Request(HttpMethod.POST, changeRequestURL).addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON).addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON).setContent(requestBodyWriter.writeValueAsBytes(newRequests)), responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));
Futures.addCallback(future, new FutureCallback<InputStream>() {
@Override
public void onSuccess(InputStream result) {
boolean scheduleNextRunImmediately = true;
try {
if (responseHandler.getStatus() == HttpServletResponse.SC_NO_CONTENT) {
log.trace("Received NO CONTENT reseponse from [%s]", serverId);
} else if (HttpServletResponse.SC_OK == responseHandler.getStatus()) {
try {
List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper.readValue(result, RESPONSE_ENTITY_TYPE_REF);
log.trace("Server[%s] returned status response [%s].", serverId, statuses);
synchronized (lock) {
if (stopped) {
log.trace("Ignoring response from Server[%s]. We are already stopped.", serverId);
scheduleNextRunImmediately = false;
return;
}
for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
switch(e.getStatus().getState()) {
case SUCCESS:
case FAILED:
handleResponseStatus(e.getRequest(), e.getStatus());
break;
case PENDING:
log.trace("Request[%s] is still pending on server[%s].", e.getRequest(), serverId);
break;
default:
scheduleNextRunImmediately = false;
log.error("Server[%s] returned unknown state in status[%s].", serverId, e.getStatus());
}
}
}
} catch (Exception ex) {
scheduleNextRunImmediately = false;
logRequestFailure(ex);
}
} else {
scheduleNextRunImmediately = false;
logRequestFailure(new RE("Unexpected Response Status."));
}
} finally {
mainLoopInProgress.set(false);
if (scheduleNextRunImmediately) {
processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
}
}
}
@Override
public void onFailure(Throwable t) {
try {
logRequestFailure(t);
} finally {
mainLoopInProgress.set(false);
}
}
private void logRequestFailure(Throwable t) {
log.error(t, "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL, responseHandler.getStatus(), responseHandler.getDescription());
}
}, processingExecutor);
} catch (Throwable th) {
log.error(th, "Error sending load/drop request to [%s].", serverId);
mainLoopInProgress.set(false);
}
}
Aggregations