use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testRedundantReplicaDropDuringDecommissioning.
/**
* 3 servers hosting 3 replicas of the segment.
* 1 servers is decommissioning.
* 1 replica is redundant.
* Should drop from the decommissioning server.
*/
@Test
public void testRedundantReplicaDropDuringDecommissioning() {
final LoadQueuePeon mockPeon1 = new LoadQueuePeonTester();
final LoadQueuePeon mockPeon2 = new LoadQueuePeonTester();
final LoadQueuePeon mockPeon3 = new LoadQueuePeonTester();
EasyMock.expect(mockBalancerStrategy.pickServersToDrop(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).times(4);
EasyMock.replay(throttler, mockBalancerStrategy);
LoadRule rule = createLoadRule(ImmutableMap.of("tier1", 2));
final DataSegment segment1 = createDataSegment("foo1");
DruidServer server1 = createServer("tier1");
server1.addDataSegment(segment1);
DruidServer server2 = createServer("tier1");
server2.addDataSegment(segment1);
DruidServer server3 = createServer("tier1");
server3.addDataSegment(segment1);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("tier1", new ServerHolder(server1.toImmutableDruidServer(), mockPeon1, false), new ServerHolder(server2.toImmutableDruidServer(), mockPeon2, true), new ServerHolder(server3.toImmutableDruidServer(), mockPeon3, false)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment1), segment1);
Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "tier1"));
Assert.assertEquals(0, mockPeon1.getSegmentsToDrop().size());
Assert.assertEquals(1, mockPeon2.getSegmentsToDrop().size());
Assert.assertEquals(0, mockPeon3.getSegmentsToDrop().size());
EasyMock.verify(throttler);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testLoadPrimaryAssignDoesNotOverAssign.
@Test
public void testLoadPrimaryAssignDoesNotOverAssign() {
EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
final LoadQueuePeon mockPeon = createEmptyPeon();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
final DataSegment segment = createDataSegment("foo");
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
EasyMock.replay(throttler, mockPeon, mockBalancerStrategy);
ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, mockPeon), new ServerHolder(server2, mockPeon)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParams(druidCluster, segment), segment);
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
// ensure multiple runs don't assign primary segment again if at replication count
final LoadQueuePeon loadingPeon = createLoadingPeon(ImmutableList.of(segment), false);
EasyMock.replay(loadingPeon);
DruidCluster afterLoad = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, loadingPeon), new ServerHolder(server2, mockPeon)).build();
CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParams(afterLoad, segment), segment);
Assert.assertEquals(0, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
EasyMock.verify(throttler, mockPeon, mockBalancerStrategy);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class LoadRuleTest method testOverAssignForTimedOutSegments.
@Test
public void testOverAssignForTimedOutSegments() {
EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes();
final LoadQueuePeon emptyPeon = createEmptyPeon();
emptyPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
LoadRule rule = createLoadRule(ImmutableMap.of("hot", 1));
final DataSegment segment = createDataSegment("foo");
EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())).andDelegateTo(balancerStrategy).anyTimes();
EasyMock.replay(throttler, emptyPeon, mockBalancerStrategy);
ImmutableDruidServer server1 = new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
ImmutableDruidServer server2 = new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 1).toImmutableDruidServer();
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, emptyPeon), new ServerHolder(server2, emptyPeon)).build();
CoordinatorStats stats = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(druidCluster, segment), segment);
// Ensure that the segment is assigned to one of the historicals
Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
// Ensure that the primary segment is assigned again in case the peon timed out on loading the segment
final LoadQueuePeon slowLoadingPeon = createLoadingPeon(ImmutableList.of(segment), true);
EasyMock.replay(slowLoadingPeon);
DruidCluster withLoadTimeout = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1, slowLoadingPeon), new ServerHolder(server2, emptyPeon)).build();
CoordinatorStats statsAfterLoadPrimary = rule.run(null, makeCoordinatorRuntimeParamsWithLoadReplicationOnTimeout(withLoadTimeout, segment), segment);
Assert.assertEquals(1L, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot"));
EasyMock.verify(throttler, emptyPeon, mockBalancerStrategy);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class SqlVectorizedExpressionSanityTest method setupClass.
@BeforeClass
public static void setupClass() {
Calcites.setSystemProperties();
ExpressionProcessing.initializeForStrictBooleansTests(true);
CLOSER = Closer.create();
final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("expression-testbench");
final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
final SegmentGenerator segmentGenerator = CLOSER.register(new SegmentGenerator());
INDEX = CLOSER.register(segmentGenerator.generate(dataSegment, schemaInfo, Granularities.HOUR, ROWS_PER_SEGMENT));
CONGLOMERATE = QueryStackTests.createQueryRunnerFactoryConglomerate(CLOSER);
WALKER = new SpecificSegmentsQuerySegmentWalker(CONGLOMERATE).add(dataSegment, INDEX);
CLOSER.register(WALKER);
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(CONGLOMERATE, WALKER, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
PLANNER_FACTORY = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(WALKER, CONGLOMERATE), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class SqlSegmentsMetadataManager method replaceWithExistingSegmentIfPresent.
/**
* For the garbage collector in Java, it's better to keep new objects short-living, but once they are old enough
* (i. e. promoted to old generation), try to keep them alive. In {@link #poll()}, we fetch and deserialize all
* existing segments each time, and then replace them in {@link #dataSourcesSnapshot}. This method allows to use
* already existing (old) segments when possible, effectively interning them a-la {@link String#intern} or {@link
* com.google.common.collect.Interner}, aiming to make the majority of {@link DataSegment} objects garbage soon after
* they are deserialized and to die in young generation. It allows to avoid fragmentation of the old generation and
* full GCs.
*/
private DataSegment replaceWithExistingSegmentIfPresent(DataSegment segment) {
@MonotonicNonNull DataSourcesSnapshot dataSourcesSnapshot = this.dataSourcesSnapshot;
if (dataSourcesSnapshot == null) {
return segment;
}
@Nullable ImmutableDruidDataSource dataSource = dataSourcesSnapshot.getDataSource(segment.getDataSource());
if (dataSource == null) {
return segment;
}
DataSegment alreadyExistingSegment = dataSource.getSegment(segment.getId());
return alreadyExistingSegment != null ? alreadyExistingSegment : segment;
}
Aggregations