Search in sources :

Example 56 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class DruidSchemaTest method testAvailableSegmentMetadataNumRows.

/**
 * This tests that {@link AvailableSegmentMetadata#getNumRows()} is correct in case
 * of multiple replicas i.e. when {@link DruidSchema#addSegment(DruidServerMetadata, DataSegment)}
 * is called more than once for same segment
 */
@Test
public void testAvailableSegmentMetadataNumRows() {
    Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = schema.getSegmentMetadataSnapshot();
    final List<DataSegment> segments = segmentsMetadata.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
    Assert.assertEquals(4, segments.size());
    // find the only segment with datasource "foo2"
    final DataSegment existingSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
    Assert.assertNotNull(existingSegment);
    final AvailableSegmentMetadata existingMetadata = segmentsMetadata.get(existingSegment.getId());
    // update AvailableSegmentMetadata of existingSegment with numRows=5
    AvailableSegmentMetadata updatedMetadata = AvailableSegmentMetadata.from(existingMetadata).withNumRows(5).build();
    schema.setAvailableSegmentMetadata(existingSegment.getId(), updatedMetadata);
    // find a druidServer holding existingSegment
    final Pair<ImmutableDruidServer, DataSegment> pair = druidServers.stream().flatMap(druidServer -> druidServer.iterateAllSegments().stream().filter(segment -> segment.getId().equals(existingSegment.getId())).map(segment -> Pair.of(druidServer, segment))).findAny().orElse(null);
    Assert.assertNotNull(pair);
    final ImmutableDruidServer server = pair.lhs;
    Assert.assertNotNull(server);
    final DruidServerMetadata druidServerMetadata = server.getMetadata();
    // invoke DruidSchema#addSegment on existingSegment
    schema.addSegment(druidServerMetadata, existingSegment);
    segmentsMetadata = schema.getSegmentMetadataSnapshot();
    // get the only segment with datasource "foo2"
    final DataSegment currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
    final AvailableSegmentMetadata currentMetadata = segmentsMetadata.get(currentSegment.getId());
    Assert.assertEquals(updatedMetadata.getSegment().getId(), currentMetadata.getSegment().getId());
    Assert.assertEquals(updatedMetadata.getNumRows(), currentMetadata.getNumRows());
    // numreplicas do not change here since we addSegment with the same server which was serving existingSegment before
    Assert.assertEquals(updatedMetadata.getNumReplicas(), currentMetadata.getNumReplicas());
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) SegmentId(org.apache.druid.timeline.SegmentId) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 57 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class FireHydrantTest method testGetSegmentForQueryButNotAbleToAcquireReferences.

@Test
public void testGetSegmentForQueryButNotAbleToAcquireReferences() {
    ReferenceCountingSegment incrementalSegmentReference = hydrant.getHydrantSegment();
    Assert.assertEquals(0, incrementalSegmentReference.getNumReferences());
    Optional<Pair<SegmentReference, Closeable>> maybeSegmentAndCloseable = hydrant.getSegmentForQuery(segmentReference -> new SegmentReference() {

        @Override
        public Optional<Closeable> acquireReferences() {
            return Optional.empty();
        }

        @Override
        public SegmentId getId() {
            return incrementalIndexSegment.getId();
        }

        @Override
        public Interval getDataInterval() {
            return incrementalIndexSegment.getDataInterval();
        }

        @Nullable
        @Override
        public QueryableIndex asQueryableIndex() {
            return incrementalIndexSegment.asQueryableIndex();
        }

        @Override
        public StorageAdapter asStorageAdapter() {
            return incrementalIndexSegment.asStorageAdapter();
        }

        @Override
        public void close() {
            incrementalIndexSegment.close();
        }
    });
    Assert.assertFalse(maybeSegmentAndCloseable.isPresent());
    Assert.assertEquals(0, incrementalSegmentReference.getNumReferences());
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) QueryableIndex(org.apache.druid.segment.QueryableIndex) SegmentReference(org.apache.druid.segment.SegmentReference) StorageAdapter(org.apache.druid.segment.StorageAdapter) Nullable(javax.annotation.Nullable) Pair(org.apache.druid.java.util.common.Pair) Interval(org.joda.time.Interval) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 58 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class BalanceSegmentsTester method moveSegment.

@Override
protected boolean moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
    final String toServerName = toServer.getName();
    final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServerName);
    final String fromServerName = segment.getFromServer().getName();
    final DataSegment segmentToMove = segment.getSegment();
    final SegmentId segmentId = segmentToMove.getId();
    if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentId) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
        log.info("Moving [%s] from [%s] to [%s]", segmentId, fromServerName, toServerName);
        try {
            final LoadQueuePeon loadPeon = params.getLoadManagementPeons().get(toServerName);
            loadPeon.loadSegment(segment.getSegment(), () -> {
            });
            final LoadQueuePeon dropPeon = params.getLoadManagementPeons().get(fromServerName);
            dropPeon.markSegmentToDrop(segment.getSegment());
            currentlyMovingSegments.get("normal").put(segmentId, segment);
            return true;
        } catch (Exception e) {
            log.info(e, StringUtils.format("[%s] : Moving exception", segmentId));
        }
    }
    return false;
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DataSegment(org.apache.druid.timeline.DataSegment)

Example 59 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class CostBalancerStrategyTest method setupDummyCluster.

/**
 * Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
 * Cost Balancer Strategy should assign the next segment to the server with less segments.
 */
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
    List<ServerHolder> serverHolderList = new ArrayList<>();
    // Each having having 100 segments
    for (int i = 0; i < serverCount; i++) {
        LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
        List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
        ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
        String serverName = "DruidServer_Name_" + i;
        ServerHolder serverHolder = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata(serverName, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon);
        serverHolderList.add(serverHolder);
    }
    // The best server to be available for next segment assignment has only 98 Segments
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
    EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
    Map<SegmentId, DataSegment> segments = new HashMap<>();
    for (int j = 0; j < (maxSegments - 2); j++) {
        DataSegment segment = getSegment(j);
        segments.put(segment.getId(), segment);
        EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
    }
    ImmutableDruidServerTests.expectSegments(druidServer, segments.values());
    EasyMock.replay(druidServer);
    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
    return serverHolderList;
}
Also used : IntStream(java.util.stream.IntStream) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) Intervals(org.apache.druid.java.util.common.Intervals) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableDruidServerTests(org.apache.druid.client.ImmutableDruidServerTests) ImmutableMap(com.google.common.collect.ImmutableMap) DateTime(org.joda.time.DateTime) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) Assert(org.junit.Assert) Collections(java.util.Collections) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 60 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class HttpLoadQueuePeonTest method testSimple.

@Test(timeout = 60_000L)
public void testSimple() throws Exception {
    HttpLoadQueuePeon httpLoadQueuePeon = new HttpLoadQueuePeon("http://dummy:4000", ServerTestHelper.MAPPER, new TestHttpClient(), config, Executors.newScheduledThreadPool(2, Execs.makeThreadFactory("HttpLoadQueuePeonTest-%s")), Execs.singleThreaded("HttpLoadQueuePeonTest"));
    httpLoadQueuePeon.start();
    Map<SegmentId, CountDownLatch> latches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
    httpLoadQueuePeon.dropSegment(segment1, () -> latches.get(segment1.getId()).countDown());
    httpLoadQueuePeon.loadSegment(segment2, () -> latches.get(segment2.getId()).countDown());
    httpLoadQueuePeon.dropSegment(segment3, () -> latches.get(segment3.getId()).countDown());
    httpLoadQueuePeon.loadSegment(segment4, () -> latches.get(segment4.getId()).countDown());
    latches.get(segment1.getId()).await();
    latches.get(segment2.getId()).await();
    latches.get(segment3.getId()).await();
    latches.get(segment4.getId()).await();
    httpLoadQueuePeon.stop();
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Aggregations

SegmentId (org.apache.druid.timeline.SegmentId)63 DataSegment (org.apache.druid.timeline.DataSegment)32 Test (org.junit.Test)21 Interval (org.joda.time.Interval)14 ISE (org.apache.druid.java.util.common.ISE)13 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12 Set (java.util.Set)12 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)12 List (java.util.List)11 ImmutableMap (com.google.common.collect.ImmutableMap)10 IOException (java.io.IOException)9 TreeMap (java.util.TreeMap)9 CountDownLatch (java.util.concurrent.CountDownLatch)9 VisibleForTesting (com.google.common.annotations.VisibleForTesting)8 Collectors (java.util.stream.Collectors)8 Optional (java.util.Optional)7 Sets (com.google.common.collect.Sets)6 Nullable (javax.annotation.Nullable)6 Response (javax.ws.rs.core.Response)6