Search in sources :

Example 6 with Supplier

use of com.google.common.base.Supplier in project druid by druid-io.

the class AppenderatorTest method testRestoreFromDisk.

@Test
public void testRestoreFromDisk() throws Exception {
    final RealtimeTuningConfig tuningConfig;
    try (final AppenderatorTester tester = new AppenderatorTester(2)) {
        final Appenderator appenderator = tester.getAppenderator();
        tuningConfig = tester.getTuningConfig();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Object metadata = ImmutableMap.of("eventCount", eventCount.get());
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return metadata;
                    }

                    @Override
                    public void run() {
                    // Do nothing
                    }
                };
            }
        };
        appenderator.startJob();
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "foo", 1), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "bar", 2), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "baz", 3), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "qux", 4), committerSupplier);
        eventCount.incrementAndGet();
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "bob", 5), committerSupplier);
        appenderator.close();
        try (final AppenderatorTester tester2 = new AppenderatorTester(2, tuningConfig.getBasePersistDirectory())) {
            final Appenderator appenderator2 = tester2.getAppenderator();
            Assert.assertEquals(ImmutableMap.of("eventCount", 4), appenderator2.startJob());
            Assert.assertEquals(ImmutableList.of(IDENTIFIERS.get(0)), appenderator2.getSegments());
            Assert.assertEquals(4, appenderator2.getRowCount(IDENTIFIERS.get(0)));
        }
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Supplier(com.google.common.base.Supplier) Committer(io.druid.data.input.Committer) RealtimeTuningConfig(io.druid.segment.indexing.RealtimeTuningConfig) Test(org.junit.Test)

Example 7 with Supplier

use of com.google.common.base.Supplier in project druid by druid-io.

the class AppenderatorTest method testMaxRowsInMemory.

@Test
public void testMaxRowsInMemory() throws Exception {
    try (final AppenderatorTester tester = new AppenderatorTester(3)) {
        final Appenderator appenderator = tester.getAppenderator();
        final AtomicInteger eventCount = new AtomicInteger(0);
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Object metadata = ImmutableMap.of("eventCount", eventCount.get());
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return metadata;
                    }

                    @Override
                    public void run() {
                    // Do nothing
                    }
                };
            }
        };
        Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.startJob();
        Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "foo", 1), committerSupplier);
        Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(1), IR("2000", "bar", 1), committerSupplier);
        Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(1), IR("2000", "bar", 1), committerSupplier);
        Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "baz", 1), committerSupplier);
        Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(1), IR("2000", "qux", 1), committerSupplier);
        Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.add(IDENTIFIERS.get(0), IR("2000", "bob", 1), committerSupplier);
        Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
        appenderator.close();
        Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Supplier(com.google.common.base.Supplier) Committer(io.druid.data.input.Committer) Test(org.junit.Test)

Example 8 with Supplier

use of com.google.common.base.Supplier in project druid by druid-io.

the class DruidPlanner method planWithBindableConvention.

private PlannerResult planWithBindableConvention(final SqlExplain explain, final RelRoot root) throws RelConversionException {
    BindableRel bindableRel = (BindableRel) planner.transform(Rules.BINDABLE_CONVENTION_RULES, planner.getEmptyTraitSet().replace(BindableConvention.INSTANCE).plus(root.collation), root.rel);
    if (!root.isRefTrivial()) {
        // Add a projection on top to accommodate root.fields.
        final List<RexNode> projects = new ArrayList<>();
        final RexBuilder rexBuilder = bindableRel.getCluster().getRexBuilder();
        for (int field : Pair.left(root.fields)) {
            projects.add(rexBuilder.makeInputRef(bindableRel, field));
        }
        bindableRel = new Bindables.BindableProject(bindableRel.getCluster(), bindableRel.getTraitSet(), bindableRel, projects, root.validatedRowType);
    }
    if (explain != null) {
        return planExplanation(bindableRel, explain);
    } else {
        final BindableRel theRel = bindableRel;
        final DataContext dataContext = plannerContext.createDataContext((JavaTypeFactory) planner.getTypeFactory());
        final Supplier<Sequence<Object[]>> resultsSupplier = new Supplier<Sequence<Object[]>>() {

            @Override
            public Sequence<Object[]> get() {
                final Enumerable enumerable = theRel.bind(dataContext);
                return Sequences.simple(enumerable);
            }
        };
        return new PlannerResult(resultsSupplier, root.validatedRowType);
    }
}
Also used : ArrayList(java.util.ArrayList) BindableRel(org.apache.calcite.interpreter.BindableRel) Bindables(org.apache.calcite.interpreter.Bindables) Sequence(io.druid.java.util.common.guava.Sequence) DataContext(org.apache.calcite.DataContext) RexBuilder(org.apache.calcite.rex.RexBuilder) Enumerable(org.apache.calcite.linq4j.Enumerable) Supplier(com.google.common.base.Supplier) RexNode(org.apache.calcite.rex.RexNode)

Example 9 with Supplier

use of com.google.common.base.Supplier in project druid by druid-io.

the class KafkaIndexTask method run.

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;
    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }
    runThread = Thread.currentThread();
    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
        final FiniteAppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics);
        final KafkaConsumer<byte[], byte[]> consumer = newConsumer()) {
        appenderator = appenderator0;
        final String topic = ioConfig.getStartPartitions().getTopic();
        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getStartPartitions().getPartitionOffsetMap());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper().convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
            nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());
            // Sanity checks.
            if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
                throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]", restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
            }
            if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(), ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
            }
        }
        // Set up sequenceNames.
        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }
        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
                    }

                    @Override
                    public void run() {
                    // Do nothing.
                    }
                };
            }
        };
        Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);
        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            while (stillReading) {
                if (possiblyPause(assignment)) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. This is safe even if the end offsets have not been modified.
                    assignment = assignPartitionsAndSeekToNext(consumer, topic);
                    if (assignment.isEmpty()) {
                        log.info("All partitions have been fully read");
                        publishOnStop = true;
                        stopRequested = true;
                    }
                }
                if (stopRequested) {
                    break;
                }
                // The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
                // offset is not present in the topic-partition. This can happen if we're asking a task to read from data
                // that has not been written yet (which is totally legitimate). So let's wait for it to show up.
                ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
                try {
                    records = consumer.poll(POLL_TIMEOUT);
                } catch (OffsetOutOfRangeException e) {
                    log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
                    possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
                    stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                }
                for (ConsumerRecord<byte[], byte[]> record : records) {
                    if (log.isTraceEnabled()) {
                        log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(), record.offset());
                    }
                    if (record.offset() < endOffsets.get(record.partition())) {
                        if (record.offset() != nextOffsets.get(record.partition())) {
                            throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].", record.offset(), nextOffsets.get(record.partition()), record.partition());
                        }
                        try {
                            final byte[] valueBytes = record.value();
                            if (valueBytes == null) {
                                throw new ParseException("null value");
                            }
                            final InputRow row = Preconditions.checkNotNull(parser.parse(ByteBuffer.wrap(valueBytes)), "row");
                            if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {
                                final SegmentIdentifier identifier = driver.add(row, sequenceNames.get(record.partition()), committerSupplier);
                                if (identifier == null) {
                                    // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                    throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
                                }
                                fireDepartmentMetrics.incrementProcessed();
                            } else {
                                fireDepartmentMetrics.incrementThrownAway();
                            }
                        } catch (ParseException e) {
                            if (tuningConfig.isReportParseExceptions()) {
                                throw e;
                            } else {
                                log.debug(e, "Dropping unparseable row from partition[%d] offset[%,d].", record.partition(), record.offset());
                                fireDepartmentMetrics.incrementUnparseable();
                            }
                        }
                        nextOffsets.put(record.partition(), record.offset() + 1);
                    }
                    if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) && assignment.remove(record.partition())) {
                        log.info("Finished reading topic[%s], partition[%,d].", record.topic(), record.partition());
                        assignPartitions(consumer, topic, assignment);
                        stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                    }
                }
            }
        } finally {
            // persist pending data
            driver.persist(committerSupplier.get());
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }
            status = Status.PUBLISHING;
        }
        final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {

            @Override
            public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
                final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue(((Map) commitMetadata).get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
                // Sanity check, we should only be publishing things that match our desired end state.
                if (!endOffsets.equals(finalPartitions.getPartitionOffsetMap())) {
                    throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
                }
                final SegmentTransactionalInsertAction action;
                if (ioConfig.isUseTransaction()) {
                    action = new SegmentTransactionalInsertAction(segments, new KafkaDataSourceMetadata(ioConfig.getStartPartitions()), new KafkaDataSourceMetadata(finalPartitions));
                } else {
                    action = new SegmentTransactionalInsertAction(segments, null, null);
                }
                log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());
                return toolbox.getTaskActionClient().submit(action).isSuccess();
            }
        };
        final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
        if (published == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {

                @Override
                public String apply(DataSegment input) {
                    return input.getIdentifier();
                }
            })), published.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }
        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }
        log.info("The task was asked to stop before completing");
    } finally {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
    }
    return success();
}
Also used : RealtimeIOConfig(io.druid.segment.indexing.RealtimeIOConfig) Set(java.util.Set) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) SegmentTransactionalInsertAction(io.druid.indexing.common.actions.SegmentTransactionalInsertAction) DataSegment(io.druid.timeline.DataSegment) FireDepartment(io.druid.segment.realtime.FireDepartment) TransactionalSegmentPublisher(io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) ISE(io.druid.java.util.common.ISE) Supplier(com.google.common.base.Supplier) SegmentsAndMetadata(io.druid.segment.realtime.appenderator.SegmentsAndMetadata) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Appenderator(io.druid.segment.realtime.appenderator.Appenderator) FiniteAppenderatorDriver(io.druid.segment.realtime.appenderator.FiniteAppenderatorDriver) InputRow(io.druid.data.input.InputRow) RealtimeMetricsMonitor(io.druid.segment.realtime.RealtimeMetricsMonitor) Committer(io.druid.data.input.Committer) ParseException(io.druid.java.util.common.parsers.ParseException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 10 with Supplier

use of com.google.common.base.Supplier in project druid by druid-io.

the class GroupByRowProcessor method process.

public static Sequence<Row> process(final Query queryParam, final Sequence<Row> rows, final Map<String, ValueType> rowSignature, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final AggregatorFactory[] aggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        aggregatorFactories[i] = query.getAggregatorSpecs().get(i);
    }
    final File temporaryStorageDirectory = new File(processingTmpDir, String.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final List<Interval> queryIntervals = query.getIntervals();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));
    final SettableSupplier<Row> rowSupplier = new SettableSupplier<>();
    final RowBasedColumnSelectorFactory columnSelectorFactory = RowBasedColumnSelectorFactory.create(rowSupplier, rowSignature);
    final ValueMatcher filterMatcher = filter == null ? BooleanValueMatcher.of(true) : filter.makeMatcher(columnSelectorFactory);
    final FilteredSequence<Row> filteredSequence = new FilteredSequence<>(rows, new Predicate<Row>() {

        @Override
        public boolean apply(Row input) {
            boolean inInterval = false;
            DateTime rowTime = input.getTimestamp();
            for (Interval queryInterval : queryIntervals) {
                if (queryInterval.contains(rowTime)) {
                    inInterval = true;
                    break;
                }
            }
            if (!inInterval) {
                return false;
            }
            rowSupplier.set(input);
            return filterMatcher.matches();
        }
    });
    return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {

        @Override
        public CloseableGrouperIterator<RowBasedKey, Row> make() {
            // This contains all closeable objects which are closed when the returned iterator iterates all the elements,
            // or an exceptions is thrown. The objects are closed in their reverse order.
            final List<Closeable> closeOnExit = Lists.newArrayList();
            try {
                final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                closeOnExit.add(temporaryStorage);
                Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, true, rowSignature, querySpecificConfig, new Supplier<ByteBuffer>() {

                    @Override
                    public ByteBuffer get() {
                        final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
                        closeOnExit.add(mergeBufferHolder);
                        return mergeBufferHolder.get();
                    }
                }, -1, temporaryStorage, spillMapper, aggregatorFactories);
                final Grouper<RowBasedKey> grouper = pair.lhs;
                final Accumulator<Grouper<RowBasedKey>, Row> accumulator = pair.rhs;
                closeOnExit.add(grouper);
                final Grouper<RowBasedKey> retVal = filteredSequence.accumulate(grouper, accumulator);
                if (retVal != grouper) {
                    throw GroupByQueryHelper.throwAccumulationResourceLimitExceededException();
                }
                return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {

                    @Override
                    public void close() throws IOException {
                        for (Closeable closeable : Lists.reverse(closeOnExit)) {
                            CloseQuietly.close(closeable);
                        }
                    }
                });
            } catch (Throwable e) {
                // Exception caught while setting up the iterator; release resources.
                for (Closeable closeable : Lists.reverse(closeOnExit)) {
                    CloseQuietly.close(closeable);
                }
                throw e;
            }
        }

        @Override
        public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
            iterFromMake.close();
        }
    });
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) Closeable(java.io.Closeable) RowBasedColumnSelectorFactory(io.druid.query.groupby.RowBasedColumnSelectorFactory) DateTime(org.joda.time.DateTime) GroupByQuery(io.druid.query.groupby.GroupByQuery) List(java.util.List) Supplier(com.google.common.base.Supplier) SettableSupplier(io.druid.common.guava.SettableSupplier) Pair(io.druid.java.util.common.Pair) BooleanValueMatcher(io.druid.segment.filter.BooleanValueMatcher) ValueMatcher(io.druid.query.filter.ValueMatcher) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) FilteredSequence(io.druid.java.util.common.guava.FilteredSequence) RowBasedKey(io.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) ByteBuffer(java.nio.ByteBuffer) BaseSequence(io.druid.java.util.common.guava.BaseSequence) SettableSupplier(io.druid.common.guava.SettableSupplier) Filter(io.druid.query.filter.Filter) Row(io.druid.data.input.Row) File(java.io.File) Interval(org.joda.time.Interval)

Aggregations

Supplier (com.google.common.base.Supplier)51 IOException (java.io.IOException)14 Test (org.junit.Test)11 ImmutableMap (com.google.common.collect.ImmutableMap)8 Map (java.util.Map)8 Path (java.nio.file.Path)5 ArrayList (java.util.ArrayList)5 List (java.util.List)5 Set (java.util.Set)5 VisibleForTesting (com.google.common.annotations.VisibleForTesting)4 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 Committer (io.druid.data.input.Committer)4 ByteBuffer (java.nio.ByteBuffer)4 Optional (java.util.Optional)4 SourcePath (com.facebook.buck.rules.SourcePath)3 InputRow (io.druid.data.input.InputRow)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3 File (java.io.File)3 HashMap (java.util.HashMap)3