Search in sources :

Example 6 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class FlowTest method testFlow.

@Test
public void testFlow() throws Exception {
    final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(WordCountApp.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        // running mapreduce is out of scope of this tests (there's separate unit-test for that)
        if (programDescriptor.getProgramId().getType() == ProgramType.MAPREDUCE) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    TimeUnit.SECONDS.sleep(1);
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "text");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 10; i++) {
        String msg = "Testing message " + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commitOrThrow(tx);
    // Query the service for at most 10 seconds for the expected result
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "WordCountApp", "WordFrequencyService"));
    EndpointStrategy endpointStrategy = new RandomEndpointStrategy(serviceDiscovered);
    int trials = 0;
    while (trials++ < 10) {
        Discoverable discoverable = endpointStrategy.pick(2, TimeUnit.SECONDS);
        URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "WordCountApp", "WordFrequencyService", "wordfreq", "text:Testing"));
        try {
            HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
            Map<String, Long> responseContent = gson.fromJson(new InputStreamReader(urlConn.getInputStream(), Charsets.UTF_8), new TypeToken<Map<String, Long>>() {
            }.getType());
            LOG.info("Service response: " + responseContent);
            if (ImmutableMap.of("text:Testing", 10L).equals(responseContent)) {
                break;
            }
        } catch (Throwable t) {
            LOG.info("Exception when trying to query service.", t);
        }
        TimeUnit.SECONDS.sleep(1);
    }
    Assert.assertTrue(trials < 10);
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) Gson(com.google.gson.Gson) URL(java.net.URL) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) HttpURLConnection(java.net.HttpURLConnection) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) EndpointStrategy(co.cask.cdap.common.discovery.EndpointStrategy) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ServiceDiscovered(org.apache.twill.discovery.ServiceDiscovered) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) TypeToken(com.google.common.reflect.TypeToken) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 7 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class OpenCloseDataSetTest method testDataSetsAreClosed.

@Test(timeout = 120000)
public void testDataSetsAreClosed() throws Exception {
    final String tableName = "foo";
    TrackingTable.resetTracker();
    ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(DummyAppWithTrackingTable.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    // start the programs
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    // write some data to queue
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "xx");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 4; i++) {
        String msg = "x" + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commitOrThrow(tx);
    while (TrackingTable.getTracker(tableName, "write") < 4) {
        TimeUnit.MILLISECONDS.sleep(50);
    }
    // get the number of writes to the foo table
    Assert.assertEquals(4, TrackingTable.getTracker(tableName, "write"));
    // only 2 "open" calls should be tracked:
    // 1. the flow has started with single flowlet (service is loaded lazily on 1st request)
    // 2. DatasetSystemMetadataWriter also instantiates the dataset because it needs to add some system tags
    // for the dataset
    Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
    // now send a request to the service
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    Discoverable discoverable = new RandomEndpointStrategy(discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getEntityName(), "dummy", "DummyService"))).pick(5, TimeUnit.SECONDS);
    Assert.assertNotNull(discoverable);
    HttpClient client = new DefaultHttpClient();
    HttpGet get = new HttpGet(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "dummy", "DummyService", "x1"));
    HttpResponse response = client.execute(get);
    String responseContent = gson.fromJson(new InputStreamReader(response.getEntity().getContent(), Charsets.UTF_8), String.class);
    client.getConnectionManager().shutdown();
    Assert.assertEquals("x1", responseContent);
    // now the dataset must have a read and another open operation
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "read"));
    Assert.assertEquals(3, TrackingTable.getTracker(tableName, "open"));
    // The dataset that was instantiated by the DatasetSystemMetadataWriter should have been closed
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "close"));
    // stop all programs, they should both close the data set foo
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
    int timesOpened = TrackingTable.getTracker(tableName, "open");
    Assert.assertTrue(timesOpened >= 2);
    Assert.assertEquals(timesOpened, TrackingTable.getTracker(tableName, "close"));
    // now start the m/r job
    ProgramController controller = null;
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER);
        }
    }
    Assert.assertNotNull(controller);
    while (!controller.getState().equals(ProgramController.State.COMPLETED)) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    // M/r job is done, one mapper and the m/r client should have opened and closed the data set foo
    // we don't know the exact number of times opened, but it is at least once, and it must be closed the same number
    // of times.
    Assert.assertTrue(timesOpened < TrackingTable.getTracker(tableName, "open"));
    Assert.assertEquals(TrackingTable.getTracker(tableName, "open"), TrackingTable.getTracker(tableName, "close"));
    Assert.assertTrue(0 < TrackingTable.getTracker("bar", "open"));
    Assert.assertEquals(TrackingTable.getTracker("bar", "open"), TrackingTable.getTracker("bar", "close"));
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) HttpGet(org.apache.http.client.methods.HttpGet) Gson(com.google.gson.Gson) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) HttpResponse(org.apache.http.HttpResponse) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) HttpClient(org.apache.http.client.HttpClient) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 8 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class TransactionHttpHandlerTest method testInvalidateTx.

/**
 * Tests invalidating a transaction.
 */
@Test
public void testInvalidateTx() throws Exception {
    TransactionSystemClient txClient = getTxClient();
    Transaction tx1 = txClient.startShort();
    HttpResponse response = doPost("/v3/transactions/" + tx1.getWritePointer() + "/invalidate");
    Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    Transaction tx2 = txClient.startShort();
    txClient.commitOrThrow(tx2);
    response = doPost("/v3/transactions/" + tx2.getWritePointer() + "/invalidate");
    Assert.assertEquals(409, response.getStatusLine().getStatusCode());
    Assert.assertEquals(400, doPost("/v3/transactions/foobar/invalidate").getStatusLine().getStatusCode());
}
Also used : TransactionSystemClient(org.apache.tephra.TransactionSystemClient) Transaction(org.apache.tephra.Transaction) HttpResponse(org.apache.http.HttpResponse) Test(org.junit.Test)

Example 9 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class ProgramScheduleStoreDatasetTest method testDeleteScheduleByTriggeringProgram.

@Test
public void testDeleteScheduleByTriggeringProgram() throws Exception {
    DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
    TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
    TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
    dsFramework.truncateInstance(Schedulers.STORE_DATASET_ID);
    final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
    Assert.assertNotNull(store);
    TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
    SatisfiableTrigger prog1Trigger = new ProgramStatusTrigger(PROG1_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
    SatisfiableTrigger prog2Trigger = new ProgramStatusTrigger(PROG2_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
    final ProgramSchedule sched1 = new ProgramSchedule("sched1", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog1Trigger, ImmutableList.<Constraint>of());
    final ProgramSchedule sched2 = new ProgramSchedule("sched2", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog2Trigger, ImmutableList.<Constraint>of());
    final ProgramSchedule schedOr = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), prog1Trigger, new AndTrigger(new OrTrigger(prog1Trigger, prog2Trigger), new PartitionTrigger(DS2_ID, 1)), new OrTrigger(prog2Trigger)), ImmutableList.<Constraint>of());
    final ProgramSchedule schedAnd = new ProgramSchedule("schedAnd", "an AND trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new AndTrigger(new PartitionTrigger(DS1_ID, 1), prog2Trigger, new AndTrigger(prog1Trigger, new PartitionTrigger(DS2_ID, 1))), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.addSchedules(ImmutableList.of(sched1, sched2, schedOr, schedAnd));
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG1_ID should trigger only sched1, schedOr, schedAnd
            Assert.assertEquals(ImmutableSet.of(sched1, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            // ProgramStatus event for PROG2_ID should trigger only sched2, schedOr, schedAnd
            Assert.assertEquals(ImmutableSet.of(sched2, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
        }
    });
    // update or delete all schedules triggered by PROG1_ID
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.modifySchedulesTriggeredByDeletedProgram(PROG1_ID);
        }
    });
    final ProgramSchedule schedOrNew = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), new AndTrigger(prog2Trigger, new PartitionTrigger(DS2_ID, 1)), prog2Trigger), ImmutableList.of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG1_ID should trigger no schedules after modifying schedules triggered by it
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
            // ProgramStatus event for PROG2_ID should trigger only sched2 and schedOrNew
            Assert.assertEquals(ImmutableSet.of(sched2, schedOrNew), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
        }
    });
    // update or delete all schedules triggered by PROG2_ID
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.modifySchedulesTriggeredByDeletedProgram(PROG2_ID);
        }
    });
    final ProgramSchedule schedOrNew1 = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS1_ID, 1), ImmutableList.of());
    final Set<ProgramSchedule> ds1Schedules = new HashSet<>();
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG2_ID should trigger no schedules after modifying schedules triggered by it
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.KILLED))));
            // event for DS1 should trigger only schedOrNew1 since all other schedules are deleted
            ds1Schedules.addAll(toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
        }
    });
    Assert.assertEquals(ImmutableSet.of(schedOrNew1), ds1Schedules);
}
Also used : OrTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.OrTrigger) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) TransactionExecutor(org.apache.tephra.TransactionExecutor) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) TransactionExecutorFactory(co.cask.cdap.data2.transaction.TransactionExecutorFactory) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) AndTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.AndTrigger) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) SatisfiableTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.SatisfiableTrigger) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) TransactionAware(org.apache.tephra.TransactionAware) ProgramStatusTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.ProgramStatusTrigger) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 10 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class MapReduceTaskContextProvider method createCacheLoader.

/**
 * Creates a {@link CacheLoader} for the task context cache.
 */
private CacheLoader<ContextCacheKey, BasicMapReduceTaskContext> createCacheLoader(final Injector injector) {
    final DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
    final DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    final SecureStore secureStore = injector.getInstance(SecureStore.class);
    final SecureStoreManager secureStoreManager = injector.getInstance(SecureStoreManager.class);
    final MessagingService messagingService = injector.getInstance(MessagingService.class);
    // Multiple instances of BasicMapReduceTaskContext can share the same program.
    final AtomicReference<Program> programRef = new AtomicReference<>();
    return new CacheLoader<ContextCacheKey, BasicMapReduceTaskContext>() {

        @Override
        public BasicMapReduceTaskContext load(ContextCacheKey key) throws Exception {
            TaskAttemptID taskAttemptId = key.getTaskAttemptID();
            // taskAttemptId could be null if used from a org.apache.hadoop.mapreduce.Partitioner or
            // from a org.apache.hadoop.io.RawComparator, in which case we can get the JobId from the conf. Note that the
            // JobId isn't in the conf for the OutputCommitter#setupJob method, in which case we use the taskAttemptId
            Path txFile = MainOutputCommitter.getTxFile(key.getConfiguration(), taskAttemptId != null ? taskAttemptId.getJobID() : null);
            FileSystem fs = txFile.getFileSystem(key.getConfiguration());
            Preconditions.checkArgument(fs.exists(txFile));
            Transaction tx;
            try (FSDataInputStream txFileInputStream = fs.open(txFile)) {
                byte[] txByteArray = ByteStreams.toByteArray(txFileInputStream);
                tx = new TransactionCodec().decode(txByteArray);
            }
            MapReduceContextConfig contextConfig = new MapReduceContextConfig(key.getConfiguration());
            MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(key.getConfiguration());
            Program program = programRef.get();
            if (program == null) {
                // Creation of program is relatively cheap, so just create and do compare and set.
                programRef.compareAndSet(null, createProgram(contextConfig, classLoader.getProgramClassLoader()));
                program = programRef.get();
            }
            WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
            DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, program.getApplicationSpecification());
            // Setup dataset framework context, if required
            if (programDatasetFramework instanceof ProgramContextAware) {
                ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
                ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
            }
            MapReduceSpecification spec = program.getApplicationSpecification().getMapReduce().get(program.getName());
            MetricsCollectionService metricsCollectionService = null;
            MapReduceMetrics.TaskType taskType = null;
            String taskId = null;
            ProgramOptions options = contextConfig.getProgramOptions();
            // from a org.apache.hadoop.io.RawComparator
            if (taskAttemptId != null) {
                taskId = taskAttemptId.getTaskID().toString();
                if (MapReduceMetrics.TaskType.hasType(taskAttemptId.getTaskType())) {
                    taskType = MapReduceMetrics.TaskType.from(taskAttemptId.getTaskType());
                    // if this is not for a mapper or a reducer, we don't need the metrics collection service
                    metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
                    options = new SimpleProgramOptions(options.getProgramId(), options.getArguments(), new BasicArguments(RuntimeArguments.extractScope("task", taskType.toString().toLowerCase(), contextConfig.getProgramOptions().getUserArguments().asMap())), options.isDebug());
                }
            }
            CConfiguration cConf = injector.getInstance(CConfiguration.class);
            TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
            return new BasicMapReduceTaskContext(program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient, tx, programDatasetFramework, classLoader.getPluginInstantiator(), contextConfig.getLocalizedResources(), secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService, mapReduceClassLoader);
        }
    };
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) NameMappedDatasetFramework(co.cask.cdap.internal.app.runtime.workflow.NameMappedDatasetFramework) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) FileSystem(org.apache.hadoop.fs.FileSystem) SecureStoreManager(co.cask.cdap.api.security.store.SecureStoreManager) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) MapReduceMetrics(co.cask.cdap.app.metrics.MapReduceMetrics) Path(org.apache.hadoop.fs.Path) Program(co.cask.cdap.app.program.Program) DefaultProgram(co.cask.cdap.app.program.DefaultProgram) MetricsCollectionService(co.cask.cdap.api.metrics.MetricsCollectionService) MapReduceSpecification(co.cask.cdap.api.mapreduce.MapReduceSpecification) AtomicReference(java.util.concurrent.atomic.AtomicReference) BasicProgramContext(co.cask.cdap.internal.app.runtime.BasicProgramContext) SecureStore(co.cask.cdap.api.security.store.SecureStore) CConfiguration(co.cask.cdap.common.conf.CConfiguration) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) ProgramOptions(co.cask.cdap.app.runtime.ProgramOptions) MessagingService(co.cask.cdap.messaging.MessagingService) Transaction(org.apache.tephra.Transaction) WorkflowProgramInfo(co.cask.cdap.internal.app.runtime.workflow.WorkflowProgramInfo) TransactionCodec(org.apache.tephra.TransactionCodec) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) CacheLoader(com.google.common.cache.CacheLoader) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) ProgramContextAware(co.cask.cdap.data.ProgramContextAware)

Aggregations

TransactionSystemClient (org.apache.tephra.TransactionSystemClient)18 Test (org.junit.Test)12 Transaction (org.apache.tephra.Transaction)10 TransactionAware (org.apache.tephra.TransactionAware)6 CConfiguration (co.cask.cdap.common.conf.CConfiguration)5 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)5 HttpResponse (org.apache.http.HttpResponse)5 DynamicTransactionExecutorFactory (co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory)4 DiscoveryServiceClient (org.apache.twill.discovery.DiscoveryServiceClient)4 NamespaceAdmin (co.cask.cdap.common.namespace.NamespaceAdmin)3 TransactionExecutorFactory (co.cask.cdap.data2.transaction.TransactionExecutorFactory)3 BasicArguments (co.cask.cdap.internal.app.runtime.BasicArguments)3 TransactionExecutor (org.apache.tephra.TransactionExecutor)3 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)2 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)2 MetricsCollectionService (co.cask.cdap.api.metrics.MetricsCollectionService)2 ProgramDescriptor (co.cask.cdap.app.program.ProgramDescriptor)2 ProgramController (co.cask.cdap.app.runtime.ProgramController)2 RandomEndpointStrategy (co.cask.cdap.common.discovery.RandomEndpointStrategy)2 ConfigModule (co.cask.cdap.common.guice.ConfigModule)2