use of java.util.concurrent.atomic.AtomicReference in project weave by continuuity.
the class ZKServiceDecoratorTest method testStateTransition.
@Test
public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException {
InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
zkServer.startAndWait();
try {
final String namespace = Joiner.on('/').join("/weave", RunIds.generate(), "runnables", "Runner1");
final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
zkClient.startAndWait();
zkClient.create(namespace, null, CreateMode.PERSISTENT).get();
try {
JsonObject content = new JsonObject();
content.addProperty("containerId", "container-123");
content.addProperty("host", "localhost");
RunId runId = RunIds.generate();
final Semaphore semaphore = new Semaphore(0);
ZKServiceDecorator service = new ZKServiceDecorator(ZKClients.namespace(zkClient, namespace), runId, Suppliers.ofInstance(content), new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start");
}
@Override
protected void shutDown() throws Exception {
Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop");
}
});
final String runnablePath = namespace + "/" + runId.getId();
final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING");
watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch);
Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS));
stateMatch.set("STOPPING");
Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS));
} finally {
zkClient.stopAndWait();
}
} finally {
zkServer.stopAndWait();
}
}
use of java.util.concurrent.atomic.AtomicReference in project druid by druid-io.
the class CompressedIntsIndexedSupplierTest method testConcurrentThreadReads.
// This test attempts to cause a race condition with the DirectByteBuffers, it's non-deterministic in causing it,
// which sucks but I can't think of a way to deterministically cause it...
@Test
public void testConcurrentThreadReads() throws Exception {
setupSimple(5);
final AtomicReference<String> reason = new AtomicReference<String>("none");
final int numRuns = 1000;
final CountDownLatch startLatch = new CountDownLatch(1);
final CountDownLatch stopLatch = new CountDownLatch(2);
final AtomicBoolean failureHappened = new AtomicBoolean(false);
new Thread(new Runnable() {
@Override
public void run() {
try {
startLatch.await();
} catch (InterruptedException e) {
failureHappened.set(true);
reason.set("interrupt.");
stopLatch.countDown();
return;
}
try {
for (int i = 0; i < numRuns; ++i) {
for (int j = 0; j < indexed.size(); ++j) {
final long val = vals[j];
final long indexedVal = indexed.get(j);
if (Longs.compare(val, indexedVal) != 0) {
failureHappened.set(true);
reason.set(String.format("Thread1[%d]: %d != %d", j, val, indexedVal));
stopLatch.countDown();
return;
}
}
}
} catch (Exception e) {
e.printStackTrace();
failureHappened.set(true);
reason.set(e.getMessage());
}
stopLatch.countDown();
}
}).start();
final IndexedInts indexed2 = supplier.get();
try {
new Thread(new Runnable() {
@Override
public void run() {
try {
startLatch.await();
} catch (InterruptedException e) {
stopLatch.countDown();
return;
}
try {
for (int i = 0; i < numRuns; ++i) {
for (int j = indexed2.size() - 1; j >= 0; --j) {
final long val = vals[j];
final long indexedVal = indexed2.get(j);
if (Longs.compare(val, indexedVal) != 0) {
failureHappened.set(true);
reason.set(String.format("Thread2[%d]: %d != %d", j, val, indexedVal));
stopLatch.countDown();
return;
}
}
}
} catch (Exception e) {
e.printStackTrace();
reason.set(e.getMessage());
failureHappened.set(true);
}
stopLatch.countDown();
}
}).start();
startLatch.countDown();
stopLatch.await();
} finally {
CloseQuietly.close(indexed2);
}
if (failureHappened.get()) {
Assert.fail("Failure happened. Reason: " + reason.get());
}
}
use of java.util.concurrent.atomic.AtomicReference in project druid by druid-io.
the class LookupCoordinatorManager method updateAllOnHost.
void updateAllOnHost(final URL url, Map<String, Map<String, Object>> knownLookups) throws IOException, InterruptedException, ExecutionException {
final AtomicInteger returnCode = new AtomicInteger(0);
final AtomicReference<String> reasonString = new AtomicReference<>(null);
final byte[] bytes;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Loading up %d lookups to %s", knownLookups.size(), url);
}
bytes = smileMapper.writeValueAsBytes(knownLookups);
} catch (JsonProcessingException e) {
throw Throwables.propagate(e);
}
try (final InputStream result = httpClient.go(new Request(HttpMethod.POST, url).addHeader(HttpHeaders.Names.ACCEPT, SmileMediaTypes.APPLICATION_JACKSON_SMILE).addHeader(HttpHeaders.Names.CONTENT_TYPE, SmileMediaTypes.APPLICATION_JACKSON_SMILE).setContent(bytes), makeResponseHandler(returnCode, reasonString), lookupCoordinatorManagerConfig.getHostUpdateTimeout()).get()) {
if (!httpStatusIsSuccess(returnCode.get())) {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
StreamUtils.copyAndClose(result, baos);
} catch (IOException e2) {
LOG.warn(e2, "Error reading response");
}
throw new IOException(String.format("Bad update request to [%s] : [%d] : [%s] Response: [%s]", url, returnCode.get(), reasonString.get(), StringUtils.fromUtf8(baos.toByteArray())));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Update on [%s], Status: %s reason: [%s]", url, returnCode.get(), reasonString.get());
}
final Map<String, Object> resultMap = smileMapper.readValue(result, MAP_STRING_OBJ_TYPE);
final Object missingValuesObject = resultMap.get(LookupModule.FAILED_UPDATES_KEY);
if (null == missingValuesObject) {
throw new IAE("Update result did not have field for [%s]", LookupModule.FAILED_UPDATES_KEY);
}
final Map<String, Object> missingValues = smileMapper.convertValue(missingValuesObject, MAP_STRING_OBJ_TYPE);
if (!missingValues.isEmpty()) {
throw new IAE("Lookups failed to update: %s", smileMapper.writeValueAsString(missingValues.keySet()));
} else {
LOG.debug("Updated all lookups on [%s]", url);
}
}
}
}
use of java.util.concurrent.atomic.AtomicReference in project druid by druid-io.
the class DruidCoordinatorSegmentMergerTest method merge.
/**
* Runs DruidCoordinatorSegmentMerger on a particular set of segments and returns the list of requested merges.
*/
private static List<List<DataSegment>> merge(final Collection<DataSegment> segments) {
final JacksonConfigManager configManager = EasyMock.createMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(DatasourceWhitelist.CONFIG_KEY, DatasourceWhitelist.class)).andReturn(new AtomicReference<DatasourceWhitelist>(null)).anyTimes();
EasyMock.replay(configManager);
final List<List<DataSegment>> retVal = Lists.newArrayList();
final IndexingServiceClient indexingServiceClient = new IndexingServiceClient(null, null, null) {
@Override
public void mergeSegments(List<DataSegment> segmentsToMerge) {
retVal.add(segmentsToMerge);
}
};
final DruidCoordinatorSegmentMerger merger = new DruidCoordinatorSegmentMerger(indexingServiceClient, configManager);
final DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withAvailableSegments(ImmutableSet.copyOf(segments)).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMergeBytesLimit(mergeBytesLimit).withMergeSegmentsLimit(mergeSegmentsLimit).build()).withEmitter(EasyMock.createMock(ServiceEmitter.class)).build();
merger.run(params);
return retVal;
}
use of java.util.concurrent.atomic.AtomicReference in project druid by druid-io.
the class LookupCoordinatorManagerTest method testUpdateAllOnHostException.
@Test
public void testUpdateAllOnHostException() throws Exception {
final HttpResponseHandler<InputStream, InputStream> responseHandler = EasyMock.createStrictMock(HttpResponseHandler.class);
final URL url = LookupCoordinatorManager.getLookupsURL(HostAndPort.fromString("localhost"));
final SettableFuture<InputStream> future = SettableFuture.create();
future.set(new ByteArrayInputStream(new byte[0]));
EasyMock.expect(client.go(EasyMock.<Request>anyObject(), EasyMock.<SequenceInputStreamResponseHandler>anyObject(), EasyMock.<Duration>anyObject())).andReturn(future).once();
EasyMock.replay(client, responseHandler);
final LookupCoordinatorManager manager = new LookupCoordinatorManager(client, discoverer, mapper, configManager, lookupCoordinatorManagerConfig) {
@Override
HttpResponseHandler<InputStream, InputStream> makeResponseHandler(final AtomicInteger returnCode, final AtomicReference<String> reasonString) {
returnCode.set(500);
reasonString.set("");
return responseHandler;
}
};
expectedException.expect(new BaseMatcher<Throwable>() {
@Override
public boolean matches(Object o) {
return o instanceof IOException && ((IOException) o).getMessage().startsWith("Bad update request");
}
@Override
public void describeTo(Description description) {
}
});
try {
manager.updateAllOnHost(url, SINGLE_LOOKUP_MAP);
} finally {
EasyMock.verify(client, responseHandler);
}
}
Aggregations