Search in sources :

Example 11 with TransactionExecutor

use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.

the class AppMetadataStoreTest method testScanRunningInRangeWithBatch.

@Test
public void testScanRunningInRangeWithBatch() throws Exception {
    DatasetId storeTable = NamespaceId.DEFAULT.dataset("testScanRunningInRange");
    datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
    Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
    Assert.assertNotNull(table);
    final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
    TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
    // Add some run records
    TreeSet<Long> expected = new TreeSet<>();
    for (int i = 0; i < 100; ++i) {
        ApplicationId application = NamespaceId.DEFAULT.app("app" + i);
        final ProgramId program = application.program(ProgramType.values()[i % ProgramType.values().length], "program" + i);
        final RunId runId = RunIds.generate((i + 1) * 10000);
        expected.add(RunIds.getTime(runId, TimeUnit.MILLISECONDS));
        // Start the program and stop it
        final int j = i;
        txnl.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
                metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[j % ProgramRunStatus.values().length], null);
            }
        });
    }
    // Run full scan
    runScan(txnl, metadataStoreDataset, expected, 0, Long.MAX_VALUE);
    // In all below assertions, TreeSet and metadataStore both have start time inclusive and end time exclusive.
    // Run the scan with time limit
    runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 90 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(90 * 10000));
    runScan(txnl, metadataStoreDataset, expected.subSet(90 * 10000L, 101 * 10000L), TimeUnit.MILLISECONDS.toSeconds(90 * 10000), TimeUnit.MILLISECONDS.toSeconds(101 * 10000));
    // After range
    runScan(txnl, metadataStoreDataset, expected.subSet(101 * 10000L, 200 * 10000L), TimeUnit.MILLISECONDS.toSeconds(101 * 10000), TimeUnit.MILLISECONDS.toSeconds(200 * 10000));
    // Identical start and end time
    runScan(txnl, metadataStoreDataset, expected.subSet(31 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(31 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
    // One unit difference between start and end time
    runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
    // Before range
    runScan(txnl, metadataStoreDataset, expected.subSet(1000L, 10000L), TimeUnit.MILLISECONDS.toSeconds(1000), TimeUnit.MILLISECONDS.toSeconds(10000));
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) TransactionExecutor(org.apache.tephra.TransactionExecutor) ProgramId(co.cask.cdap.proto.id.ProgramId) TransactionFailureException(org.apache.tephra.TransactionFailureException) DatasetId(co.cask.cdap.proto.id.DatasetId) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TransactionAware(org.apache.tephra.TransactionAware) TreeSet(java.util.TreeSet) ApplicationId(co.cask.cdap.proto.id.ApplicationId) RunId(org.apache.twill.api.RunId) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) Test(org.junit.Test)

Example 12 with TransactionExecutor

use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.

the class AppMetadataStoreTest method testgetRuns.

@Test
public void testgetRuns() throws Exception {
    DatasetId storeTable = NamespaceId.DEFAULT.dataset("testgetRuns");
    datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
    Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
    Assert.assertNotNull(table);
    final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
    TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
    // Add some run records
    final Set<String> expected = new TreeSet<>();
    final Set<String> expectedHalf = new TreeSet<>();
    final Set<ProgramRunId> programRunIdSet = new HashSet<>();
    final Set<ProgramRunId> programRunIdSetHalf = new HashSet<>();
    for (int i = 0; i < 100; ++i) {
        ApplicationId application = NamespaceId.DEFAULT.app("app");
        final ProgramId program = application.program(ProgramType.FLOW, "program");
        final RunId runId = RunIds.generate((i + 1) * 10000);
        expected.add(runId.toString());
        final int index = i;
        // Add every other runId
        if ((i % 2) == 0) {
            expectedHalf.add(runId.toString());
        }
        ProgramRunId programRunId = new ProgramRunId(program.getNamespace(), program.getApplication(), program.getType(), program.getProgram(), runId.toString());
        programRunIdSet.add(programRunId);
        //Add every other programRunId
        if ((i % 2) == 0) {
            programRunIdSetHalf.add(programRunId);
        }
        txnl.execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // Start the program and stop it
                metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
                metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[index % ProgramRunStatus.values().length], null);
            }
        });
    }
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Map<ProgramRunId, RunRecordMeta> runMap = metadataStoreDataset.getRuns(programRunIdSet);
            Set<String> actual = new TreeSet<>();
            for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMap.entrySet()) {
                actual.add(entry.getValue().getPid());
            }
            Assert.assertEquals(expected, actual);
            Map<ProgramRunId, RunRecordMeta> runMapHalf = metadataStoreDataset.getRuns(programRunIdSetHalf);
            Set<String> actualHalf = new TreeSet<>();
            for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMapHalf.entrySet()) {
                actualHalf.add(entry.getValue().getPid());
            }
            Assert.assertEquals(expectedHalf, actualHalf);
        }
    });
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) Set(java.util.Set) TransactionExecutor(org.apache.tephra.TransactionExecutor) ProgramId(co.cask.cdap.proto.id.ProgramId) TransactionFailureException(org.apache.tephra.TransactionFailureException) DatasetId(co.cask.cdap.proto.id.DatasetId) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TransactionAware(org.apache.tephra.TransactionAware) TreeSet(java.util.TreeSet) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) ApplicationId(co.cask.cdap.proto.id.ApplicationId) RunId(org.apache.twill.api.RunId) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 13 with TransactionExecutor

use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.

the class InMemoryStreamFileWriterFactory method create.

@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
    final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
    final List<TransactionAware> txAwares = Lists.newArrayList();
    if (producer instanceof TransactionAware) {
        txAwares.add((TransactionAware) producer);
    }
    final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
    // Adapt the FileWriter interface into Queue2Producer
    return new FileWriter<StreamEvent>() {

        private final List<StreamEvent> events = Lists.newArrayList();

        @Override
        public void append(StreamEvent event) throws IOException {
            events.add(event);
        }

        @Override
        public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
            Iterators.addAll(this.events, events);
        }

        @Override
        public void close() throws IOException {
            producer.close();
        }

        @Override
        public void flush() throws IOException {
            try {
                txExecutor.execute(new TransactionExecutor.Subroutine() {

                    @Override
                    public void apply() throws Exception {
                        for (StreamEvent event : events) {
                            producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
                        }
                        events.clear();
                    }
                });
            } catch (TransactionFailureException e) {
                throw new IOException(e);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new InterruptedIOException();
            }
        }
    };
}
Also used : InterruptedIOException(java.io.InterruptedIOException) FileWriter(co.cask.cdap.data.file.FileWriter) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) TransactionExecutor(org.apache.tephra.TransactionExecutor) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) TransactionFailureException(org.apache.tephra.TransactionFailureException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) TransactionFailureException(org.apache.tephra.TransactionFailureException) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) TransactionAware(org.apache.tephra.TransactionAware) Iterator(java.util.Iterator) List(java.util.List)

Example 14 with TransactionExecutor

use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.

the class LineageStore method execute.

private <T> T execute(TransactionExecutor.Function<LineageDataset, T> func) {
    LineageDataset lineageDataset = newLineageDataset();
    TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, lineageDataset);
    return txExecutor.executeUnchecked(func, lineageDataset);
}
Also used : TransactionExecutor(org.apache.tephra.TransactionExecutor)

Example 15 with TransactionExecutor

use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.

the class DefaultMetadataStore method execute.

private void execute(TransactionExecutor.Procedure<MetadataDataset> func, MetadataScope scope) {
    MetadataDataset metadataDataset = newMetadataDataset(scope);
    TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, metadataDataset);
    txExecutor.executeUnchecked(func, metadataDataset);
}
Also used : MetadataDataset(co.cask.cdap.data2.metadata.dataset.MetadataDataset) TransactionExecutor(org.apache.tephra.TransactionExecutor)

Aggregations

TransactionExecutor (org.apache.tephra.TransactionExecutor)79 Test (org.junit.Test)64 TransactionFailureException (org.apache.tephra.TransactionFailureException)31 DatasetId (co.cask.cdap.proto.id.DatasetId)30 Table (co.cask.cdap.api.dataset.table.Table)15 List (java.util.List)15 NoSuchElementException (java.util.NoSuchElementException)15 TransactionAware (org.apache.tephra.TransactionAware)15 IOException (java.io.IOException)12 Row (co.cask.cdap.api.dataset.table.Row)11 DefaultTransactionExecutor (org.apache.tephra.DefaultTransactionExecutor)11 Put (co.cask.cdap.api.dataset.table.Put)10 ImmutableList (com.google.common.collect.ImmutableList)9 ProgramId (co.cask.cdap.proto.id.ProgramId)8 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)6 Scanner (co.cask.cdap.api.dataset.table.Scanner)6 Map (java.util.Map)6 ProgramRunId (co.cask.cdap.proto.id.ProgramRunId)5 TypeToken (com.google.common.reflect.TypeToken)5 Iterator (java.util.Iterator)5