Search in sources :

Example 21 with StringDimensionSchema

use of org.apache.druid.data.input.impl.StringDimensionSchema in project druid by druid-io.

the class MaterializedViewSupervisorTest method setUp.

@Before
public void setUp() {
    TestDerbyConnector derbyConnector = derbyConnectorRule.getConnector();
    derbyConnector.createDataSourceTable();
    derbyConnector.createSegmentTable();
    taskStorage = EasyMock.createMock(TaskStorage.class);
    taskMaster = EasyMock.createMock(TaskMaster.class);
    indexerMetadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(objectMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnector);
    metadataSupervisorManager = EasyMock.createMock(MetadataSupervisorManager.class);
    sqlSegmentsMetadataManager = EasyMock.createMock(SqlSegmentsMetadataManager.class);
    taskQueue = EasyMock.createMock(TaskQueue.class);
    taskQueue.start();
    objectMapper.registerSubtypes(new NamedType(HashBasedNumberedShardSpec.class, "hashed"));
    spec = new MaterializedViewSupervisorSpec("base", new DimensionsSpec(Collections.singletonList(new StringDimensionSchema("dim"))), new AggregatorFactory[] { new LongSumAggregatorFactory("m1", "m1") }, HadoopTuningConfig.makeDefaultTuningConfig(), null, null, null, null, null, false, objectMapper, taskMaster, taskStorage, metadataSupervisorManager, sqlSegmentsMetadataManager, indexerMetadataStorageCoordinator, new MaterializedViewTaskConfig(), EasyMock.createMock(AuthorizerMapper.class), EasyMock.createMock(ChatHandlerProvider.class), new SupervisorStateManagerConfig());
    derivativeDatasourceName = spec.getDataSourceName();
    supervisor = (MaterializedViewSupervisor) spec.createSupervisor();
}
Also used : IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) HashBasedNumberedShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedShardSpec) NamedType(com.fasterxml.jackson.databind.jsontype.NamedType) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) TestDerbyConnector(org.apache.druid.metadata.TestDerbyConnector) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) SupervisorStateManagerConfig(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManagerConfig) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) SqlSegmentsMetadataManager(org.apache.druid.metadata.SqlSegmentsMetadataManager) Before(org.junit.Before)

Example 22 with StringDimensionSchema

use of org.apache.druid.data.input.impl.StringDimensionSchema in project druid by druid-io.

the class MaterializedViewSupervisorTest method testSuspendedDoesntRun.

@Test
public void testSuspendedDoesntRun() {
    MaterializedViewSupervisorSpec suspended = new MaterializedViewSupervisorSpec("base", new DimensionsSpec(Collections.singletonList(new StringDimensionSchema("dim"))), new AggregatorFactory[] { new LongSumAggregatorFactory("m1", "m1") }, HadoopTuningConfig.makeDefaultTuningConfig(), null, null, null, null, null, true, objectMapper, taskMaster, taskStorage, metadataSupervisorManager, sqlSegmentsMetadataManager, indexerMetadataStorageCoordinator, new MaterializedViewTaskConfig(), EasyMock.createMock(AuthorizerMapper.class), EasyMock.createMock(ChatHandlerProvider.class), new SupervisorStateManagerConfig());
    MaterializedViewSupervisor supervisor = (MaterializedViewSupervisor) suspended.createSupervisor();
    // mock IndexerSQLMetadataStorageCoordinator to ensure that retrieveDataSourceMetadata is not called
    // which will be true if truly suspended, since this is the first operation of the 'run' method otherwise
    IndexerSQLMetadataStorageCoordinator mock = EasyMock.createMock(IndexerSQLMetadataStorageCoordinator.class);
    EasyMock.expect(mock.retrieveDataSourceMetadata(suspended.getDataSourceName())).andAnswer(() -> {
        Assert.fail();
        return null;
    }).anyTimes();
    EasyMock.replay(mock);
    supervisor.run();
}
Also used : IndexerSQLMetadataStorageCoordinator(org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator) SupervisorStateManagerConfig(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManagerConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) ChatHandlerProvider(org.apache.druid.segment.realtime.firehose.ChatHandlerProvider) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) Test(org.junit.Test)

Example 23 with StringDimensionSchema

use of org.apache.druid.data.input.impl.StringDimensionSchema in project druid by druid-io.

the class MaterializedViewSupervisorSpecTest method testMaterializedViewSupervisorSpecCreated.

@Test
public void testMaterializedViewSupervisorSpecCreated() {
    Exception ex = null;
    try {
        MaterializedViewSupervisorSpec spec = new MaterializedViewSupervisorSpec("wikiticker", new DimensionsSpec(Lists.newArrayList(new StringDimensionSchema("isUnpatrolled"), new StringDimensionSchema("metroCode"), new StringDimensionSchema("namespace"), new StringDimensionSchema("page"), new StringDimensionSchema("regionIsoCode"), new StringDimensionSchema("regionName"), new StringDimensionSchema("user"))), new AggregatorFactory[] { new CountAggregatorFactory("count"), new LongSumAggregatorFactory("added", "added") }, HadoopTuningConfig.makeDefaultTuningConfig(), null, null, null, null, null, false, objectMapper, null, null, null, null, null, new MaterializedViewTaskConfig(), EasyMock.createMock(AuthorizerMapper.class), new NoopChatHandlerProvider(), new SupervisorStateManagerConfig());
        Supervisor supervisor = spec.createSupervisor();
        Assert.assertTrue(supervisor instanceof MaterializedViewSupervisor);
        SupervisorTaskAutoScaler autoscaler = spec.createAutoscaler(supervisor);
        Assert.assertNull(autoscaler);
        try {
            supervisor.computeLagStats();
        } catch (Exception e) {
            Assert.assertTrue(e instanceof UnsupportedOperationException);
        }
        try {
            int count = supervisor.getActiveTaskGroupsCount();
        } catch (Exception e) {
            Assert.assertTrue(e instanceof UnsupportedOperationException);
        }
        Callable<Integer> noop = new Callable<Integer>() {

            @Override
            public Integer call() {
                return -1;
            }
        };
    } catch (Exception e) {
        ex = e;
    }
    Assert.assertNull(ex);
}
Also used : Supervisor(org.apache.druid.indexing.overlord.supervisor.Supervisor) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) ExpectedException(org.junit.rules.ExpectedException) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) SupervisorTaskAutoScaler(org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SupervisorStateManagerConfig(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManagerConfig) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Test(org.junit.Test)

Example 24 with StringDimensionSchema

use of org.apache.druid.data.input.impl.StringDimensionSchema in project druid by druid-io.

the class MaterializedViewSupervisorSpecTest method testNullBaseDataSource.

@Test
public void testNullBaseDataSource() {
    expectedException.expect(CoreMatchers.instanceOf(IllegalArgumentException.class));
    expectedException.expectMessage("baseDataSource cannot be null or empty. Please provide a baseDataSource.");
    // noinspection ResultOfObjectAllocationIgnored (this method call will trigger the expected exception)
    new MaterializedViewSupervisorSpec(null, new DimensionsSpec(Lists.newArrayList(new StringDimensionSchema("isUnpatrolled"), new StringDimensionSchema("metroCode"), new StringDimensionSchema("namespace"), new StringDimensionSchema("page"), new StringDimensionSchema("regionIsoCode"), new StringDimensionSchema("regionName"), new StringDimensionSchema("user"))), new AggregatorFactory[] { new CountAggregatorFactory("count"), new LongSumAggregatorFactory("added", "added") }, HadoopTuningConfig.makeDefaultTuningConfig(), null, null, null, null, null, false, objectMapper, null, null, null, null, null, new MaterializedViewTaskConfig(), EasyMock.createMock(AuthorizerMapper.class), new NoopChatHandlerProvider(), new SupervisorStateManagerConfig());
}
Also used : CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) NoopChatHandlerProvider(org.apache.druid.segment.realtime.firehose.NoopChatHandlerProvider) SupervisorStateManagerConfig(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManagerConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) Test(org.junit.Test)

Example 25 with StringDimensionSchema

use of org.apache.druid.data.input.impl.StringDimensionSchema in project druid by druid-io.

the class GroupByLimitPushDownMultiNodeMergeTest method setup.

@Before
public void setup() throws Exception {
    tmpDir = FileUtils.createTempDir();
    InputRow row;
    List<String> dimNames = Arrays.asList("dimA", "metA");
    Map<String, Object> event;
    final IncrementalIndex indexA = makeIncIndex(false);
    incrementalIndices.add(indexA);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 2395L);
    row = new MapBasedInputRow(1505260888888L, dimNames, event);
    indexA.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 8L);
    row = new MapBasedInputRow(1505260800000L, dimNames, event);
    indexA.add(row);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 5028L);
    row = new MapBasedInputRow(1505264400000L, dimNames, event);
    indexA.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1505264400400L, dimNames, event);
    indexA.add(row);
    final File fileA = INDEX_MERGER_V9.persist(indexA, new File(tmpDir, "A"), new IndexSpec(), null);
    QueryableIndex qindexA = INDEX_IO.loadIndex(fileA);
    final IncrementalIndex indexB = makeIncIndex(false);
    incrementalIndices.add(indexB);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 4718L);
    row = new MapBasedInputRow(1505260800000L, dimNames, event);
    indexB.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 18L);
    row = new MapBasedInputRow(1505260800000L, dimNames, event);
    indexB.add(row);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 2698L);
    row = new MapBasedInputRow(1505264400000L, dimNames, event);
    indexB.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1505264400000L, dimNames, event);
    indexB.add(row);
    final File fileB = INDEX_MERGER_V9.persist(indexB, new File(tmpDir, "B"), new IndexSpec(), null);
    QueryableIndex qindexB = INDEX_IO.loadIndex(fileB);
    final IncrementalIndex indexC = makeIncIndex(false);
    incrementalIndices.add(indexC);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 2395L);
    row = new MapBasedInputRow(1505260800000L, dimNames, event);
    indexC.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 8L);
    row = new MapBasedInputRow(1605260800000L, dimNames, event);
    indexC.add(row);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 5028L);
    row = new MapBasedInputRow(1705264400000L, dimNames, event);
    indexC.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1805264400000L, dimNames, event);
    indexC.add(row);
    final File fileC = INDEX_MERGER_V9.persist(indexC, new File(tmpDir, "C"), new IndexSpec(), null);
    QueryableIndex qindexC = INDEX_IO.loadIndex(fileC);
    final IncrementalIndex indexD = makeIncIndex(false);
    incrementalIndices.add(indexD);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 4718L);
    row = new MapBasedInputRow(1505260800000L, dimNames, event);
    indexD.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 18L);
    row = new MapBasedInputRow(1605260800000L, dimNames, event);
    indexD.add(row);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("metA", 2698L);
    row = new MapBasedInputRow(1705264400000L, dimNames, event);
    indexD.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1805264400000L, dimNames, event);
    indexD.add(row);
    final File fileD = INDEX_MERGER_V9.persist(indexD, new File(tmpDir, "D"), new IndexSpec(), null);
    QueryableIndex qindexD = INDEX_IO.loadIndex(fileD);
    List<String> dimNames2 = Arrays.asList("dimA", "dimB", "metA");
    List<DimensionSchema> dimensions = Arrays.asList(new StringDimensionSchema("dimA"), new StringDimensionSchema("dimB"), new LongDimensionSchema("metA"));
    final IncrementalIndex indexE = makeIncIndex(false, dimensions);
    incrementalIndices.add(indexE);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("dimB", "raw");
    event.put("metA", 5L);
    row = new MapBasedInputRow(1505260800000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("dimB", "ripe");
    event.put("metA", 9L);
    row = new MapBasedInputRow(1605260800000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "pomegranate");
    event.put("dimB", "raw");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1705264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "mango");
    event.put("dimB", "ripe");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "grape");
    event.put("dimB", "raw");
    event.put("metA", 5L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "apple");
    event.put("dimB", "ripe");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "apple");
    event.put("dimB", "raw");
    event.put("metA", 1L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "apple");
    event.put("dimB", "ripe");
    event.put("metA", 4L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "apple");
    event.put("dimB", "raw");
    event.put("metA", 1L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "banana");
    event.put("dimB", "ripe");
    event.put("metA", 4L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "orange");
    event.put("dimB", "raw");
    event.put("metA", 9L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "peach");
    event.put("dimB", "ripe");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "orange");
    event.put("dimB", "raw");
    event.put("metA", 2L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    event = new HashMap<>();
    event.put("dimA", "strawberry");
    event.put("dimB", "ripe");
    event.put("metA", 10L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexE.add(row);
    final File fileE = INDEX_MERGER_V9.persist(indexE, new File(tmpDir, "E"), new IndexSpec(), null);
    QueryableIndex qindexE = INDEX_IO.loadIndex(fileE);
    final IncrementalIndex indexF = makeIncIndex(false, dimensions);
    incrementalIndices.add(indexF);
    event = new HashMap<>();
    event.put("dimA", "kiwi");
    event.put("dimB", "raw");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1505260800000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "watermelon");
    event.put("dimB", "ripe");
    event.put("metA", 14L);
    row = new MapBasedInputRow(1605260800000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "kiwi");
    event.put("dimB", "raw");
    event.put("metA", 8L);
    row = new MapBasedInputRow(1705264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "kiwi");
    event.put("dimB", "ripe");
    event.put("metA", 8L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "lemon");
    event.put("dimB", "raw");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "cherry");
    event.put("dimB", "ripe");
    event.put("metA", 2L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "cherry");
    event.put("dimB", "raw");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "avocado");
    event.put("dimB", "ripe");
    event.put("metA", 12L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "cherry");
    event.put("dimB", "raw");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "plum");
    event.put("dimB", "ripe");
    event.put("metA", 5L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "plum");
    event.put("dimB", "raw");
    event.put("metA", 3L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    event = new HashMap<>();
    event.put("dimA", "lime");
    event.put("dimB", "ripe");
    event.put("metA", 7L);
    row = new MapBasedInputRow(1805264400000L, dimNames2, event);
    indexF.add(row);
    final File fileF = INDEX_MERGER_V9.persist(indexF, new File(tmpDir, "F"), new IndexSpec(), null);
    QueryableIndex qindexF = INDEX_IO.loadIndex(fileF);
    groupByIndices = Arrays.asList(qindexA, qindexB, qindexC, qindexD, qindexE, qindexF);
    resourceCloser = Closer.create();
    setupGroupByFactory();
}
Also used : IndexSpec(org.apache.druid.segment.IndexSpec) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) LongDimensionSchema(org.apache.druid.data.input.impl.LongDimensionSchema) LongDimensionSchema(org.apache.druid.data.input.impl.LongDimensionSchema) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) QueryableIndex(org.apache.druid.segment.QueryableIndex) InputRow(org.apache.druid.data.input.InputRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) File(java.io.File) Before(org.junit.Before)

Aggregations

StringDimensionSchema (org.apache.druid.data.input.impl.StringDimensionSchema)36 DimensionsSpec (org.apache.druid.data.input.impl.DimensionsSpec)30 Test (org.junit.Test)24 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)19 LongDimensionSchema (org.apache.druid.data.input.impl.LongDimensionSchema)15 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)15 FloatDimensionSchema (org.apache.druid.data.input.impl.FloatDimensionSchema)14 TimestampSpec (org.apache.druid.data.input.impl.TimestampSpec)12 MapBasedInputRow (org.apache.druid.data.input.MapBasedInputRow)11 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)11 File (java.io.File)8 ArrayList (java.util.ArrayList)8 Before (org.junit.Before)8 ImmutableList (com.google.common.collect.ImmutableList)7 HashMap (java.util.HashMap)7 DataSchema (org.apache.druid.segment.indexing.DataSchema)7 UniformGranularitySpec (org.apache.druid.segment.indexing.granularity.UniformGranularitySpec)7 List (java.util.List)6 SupervisorStateManagerConfig (org.apache.druid.indexing.overlord.supervisor.SupervisorStateManagerConfig)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5