use of java.util.concurrent.ConcurrentHashMap in project neo4j by neo4j.
the class InMemoryCountsStoreSnapshotDeserializerTest method setup.
@Before
public void setup() throws IOException {
logChannel = new InMemoryClosableChannel();
countsSnapshot = new CountsSnapshot(1, new ConcurrentHashMap<>());
}
use of java.util.concurrent.ConcurrentHashMap in project neo4j by neo4j.
the class CountsSnapshotDeserializer method deserialize.
public static CountsSnapshot deserialize(ReadableClosableChannel channel) throws IOException {
long txid = channel.getLong();
int size = channel.getInt();
Map<CountsKey, long[]> map = new ConcurrentHashMap<>(size);
CountsKey key;
long[] value;
for (int i = 0; i < size; i++) {
CountsKeyType type = value(channel.get());
switch(type) {
case ENTITY_NODE:
key = nodeKey(channel.getInt());
value = new long[] { channel.getLong() };
map.put(key, value);
break;
case ENTITY_RELATIONSHIP:
int startLabelId = channel.getInt();
int typeId = channel.getInt();
int endLabelId = channel.getInt();
key = relationshipKey(startLabelId, typeId, endLabelId);
value = new long[] { channel.getLong() };
map.put(key, value);
break;
case INDEX_SAMPLE:
key = indexSampleKey(channel.getLong());
value = new long[] { channel.getLong(), channel.getLong() };
map.put(key, value);
break;
case INDEX_STATISTICS:
key = indexStatisticsKey(channel.getLong());
value = new long[] { channel.getLong(), channel.getLong() };
map.put(key, value);
break;
case EMPTY:
throw new IllegalArgumentException("CountsKey of type EMPTY cannot be deserialized.");
default:
throw new IllegalArgumentException("The read CountsKey has an unknown type.");
}
}
return new CountsSnapshot(txid, map);
}
use of java.util.concurrent.ConcurrentHashMap in project druid by druid-io.
the class KafkaSupervisor method discoverTasks.
private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
int taskCount = 0;
List<String> futureTaskIds = Lists.newArrayList();
List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
List<Task> tasks = taskStorage.getActiveTasks();
for (Task task : tasks) {
if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
continue;
}
taskCount++;
final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
final String taskId = task.getId();
// Determine which task group this task belongs to based on one of the partitions handled by this task. If we
// later determine that this task is actively reading, we will make sure that it matches our current partition
// allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
// by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
// state, we will permit it to complete even if it doesn't match our current partition allocation to support
// seamless schema migration.
Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet().iterator();
final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);
if (taskGroupId != null) {
// check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
// and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
TaskGroup taskGroup = taskGroups.get(taskGroupId);
if (!isTaskInPendingCompletionGroups(taskId) && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
futureTaskIds.add(taskId);
futures.add(Futures.transform(taskClient.getStatusAsync(taskId), new Function<KafkaIndexTask.Status, Boolean>() {
@Override
public Boolean apply(KafkaIndexTask.Status status) {
if (status == KafkaIndexTask.Status.PUBLISHING) {
addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId, kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap());
// update partitionGroups with the publishing task's offsets (if they are greater than what is
// existing) so that the next tasks will start reading from where this task left off
Map<Integer, Long> publishingTaskCurrentOffsets = taskClient.getCurrentOffsets(taskId, true);
for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets.entrySet()) {
Integer partition = entry.getKey();
Long offset = entry.getValue();
ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups.get(getTaskGroupIdForPartition(partition));
boolean succeeded;
do {
succeeded = true;
Long previousOffset = partitionOffsets.putIfAbsent(partition, offset);
if (previousOffset != null && previousOffset < offset) {
succeeded = partitionOffsets.replace(partition, previousOffset, offset);
}
} while (!succeeded);
}
} else {
for (Integer partition : kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()) {
if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
log.warn("Stopping task [%s] which does not match the expected partition allocation", taskId);
try {
stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.warn(e, "Exception while stopping task");
}
return false;
}
}
if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(ImmutableMap.copyOf(kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()), kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
log.debug("Created new task group [%d]", taskGroupId);
}
if (!isTaskCurrent(taskGroupId, taskId)) {
log.info("Stopping task [%s] which does not match the expected parameters and ingestion spec", taskId);
try {
stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.warn(e, "Exception while stopping task");
}
return false;
} else {
taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
}
}
return true;
}
}, workerExec));
}
}
}
List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int i = 0; i < results.size(); i++) {
if (results.get(i) == null) {
String taskId = futureTaskIds.get(i);
log.warn("Task [%s] failed to return status, killing task", taskId);
killTask(taskId);
}
}
log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}
use of java.util.concurrent.ConcurrentHashMap in project druid by druid-io.
the class SQLMetadataSegmentManager method poll.
@Override
public void poll() {
try {
if (!started) {
return;
}
ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
log.debug("Starting polling of segment table");
// some databases such as PostgreSQL require auto-commit turned off
// to stream results back, enabling transactions disables auto-commit
//
// setting connection to read-only will allow some database such as MySQL
// to automatically use read-only transaction mode, further optimizing the query
final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {
@Override
public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {
@Override
public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
try {
return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
} catch (IOException e) {
log.makeAlert(e, "Failed to read segment from db.");
return null;
}
}
}).list();
}
});
if (segments == null || segments.isEmpty()) {
log.warn("No segments found in the database!");
return;
}
final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
log.info("Polled and found %,d segments in the database", segments.size());
for (final DataSegment segment : segmentsFinal) {
String datasourceName = segment.getDataSource();
DruidDataSource dataSource = newDataSources.get(datasourceName);
if (dataSource == null) {
dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
if (shouldBeNull != null) {
log.warn("Just put key[%s] into dataSources and what was there wasn't null!? It was[%s]", datasourceName, shouldBeNull);
}
}
if (!dataSource.getSegments().contains(segment)) {
dataSource.addSegment(segment.getIdentifier(), segment);
}
}
synchronized (lock) {
if (started) {
dataSources.set(newDataSources);
}
}
} catch (Exception e) {
log.makeAlert(e, "Problem polling DB.").emit();
}
}
use of java.util.concurrent.ConcurrentHashMap in project druid by druid-io.
the class CoordinatorRuleManager method poll.
@SuppressWarnings("unchecked")
public void poll() {
try {
String url = getRuleURL();
if (url == null) {
return;
}
FullResponseHolder response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
if (response.getStatus().equals(HttpResponseStatus.FOUND)) {
url = response.getResponse().headers().get("Location");
log.info("Redirecting rule request to [%s]", url);
response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
}
ConcurrentHashMap<String, List<Rule>> newRules = new ConcurrentHashMap<>((Map<String, List<Rule>>) jsonMapper.readValue(response.getContent(), new TypeReference<Map<String, List<Rule>>>() {
}));
log.info("Got [%,d] rules", newRules.size());
rules.set(newRules);
} catch (Exception e) {
log.error(e, "Exception while polling for rules");
}
}
Aggregations