Search in sources :

Example 6 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class OpenCloseDataSetTest method testDataSetsAreClosed.

@Test(timeout = 120000)
public void testDataSetsAreClosed() throws Exception {
    final String tableName = "foo";
    TrackingTable.resetTracker();
    ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(DummyAppWithTrackingTable.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    // start the programs
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    // write some data to queue
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "xx");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 4; i++) {
        String msg = "x" + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commit(tx);
    while (TrackingTable.getTracker(tableName, "write") < 4) {
        TimeUnit.MILLISECONDS.sleep(50);
    }
    // get the number of writes to the foo table
    Assert.assertEquals(4, TrackingTable.getTracker(tableName, "write"));
    // only 2 "open" calls should be tracked:
    // 1. the flow has started with single flowlet (service is loaded lazily on 1st request)
    // 2. DatasetSystemMetadataWriter also instantiates the dataset because it needs to add some system tags
    // for the dataset
    Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
    // now send a request to the service
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    Discoverable discoverable = new RandomEndpointStrategy(discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getEntityName(), "dummy", "DummyService"))).pick(5, TimeUnit.SECONDS);
    Assert.assertNotNull(discoverable);
    HttpClient client = new DefaultHttpClient();
    HttpGet get = new HttpGet(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "dummy", "DummyService", "x1"));
    HttpResponse response = client.execute(get);
    String responseContent = gson.fromJson(new InputStreamReader(response.getEntity().getContent(), Charsets.UTF_8), String.class);
    client.getConnectionManager().shutdown();
    Assert.assertEquals("x1", responseContent);
    // now the dataset must have a read and another open operation
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "read"));
    Assert.assertEquals(3, TrackingTable.getTracker(tableName, "open"));
    // The dataset that was instantiated by the DatasetSystemMetadataWriter should have been closed
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "close"));
    // stop all programs, they should both close the data set foo
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
    int timesOpened = TrackingTable.getTracker(tableName, "open");
    Assert.assertTrue(timesOpened >= 2);
    Assert.assertEquals(timesOpened, TrackingTable.getTracker(tableName, "close"));
    // now start the m/r job
    ProgramController controller = null;
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER);
        }
    }
    Assert.assertNotNull(controller);
    while (!controller.getState().equals(ProgramController.State.COMPLETED)) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    // M/r job is done, one mapper and the m/r client should have opened and closed the data set foo
    // we don't know the exact number of times opened, but it is at least once, and it must be closed the same number
    // of times.
    Assert.assertTrue(timesOpened < TrackingTable.getTracker(tableName, "open"));
    Assert.assertEquals(TrackingTable.getTracker(tableName, "open"), TrackingTable.getTracker(tableName, "close"));
    Assert.assertTrue(0 < TrackingTable.getTracker("bar", "open"));
    Assert.assertEquals(TrackingTable.getTracker("bar", "open"), TrackingTable.getTracker("bar", "close"));
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) HttpGet(org.apache.http.client.methods.HttpGet) Gson(com.google.gson.Gson) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) HttpResponse(org.apache.http.HttpResponse) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) HttpClient(org.apache.http.client.HttpClient) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 7 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class FlowTest method testAppWithArgs.

@Test
public void testAppWithArgs() throws Exception {
    final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(ArgumentCheckApp.class, TEMP_FOLDER_SUPPLIER);
    // Only running flow is good. But, in case of service, we need to send something to service as it's lazy loading
    List<ProgramController> controllers = Lists.newArrayList();
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        Arguments userArgs = new BasicArguments(ImmutableMap.of("arg", "test"));
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), userArgs, TEMP_FOLDER_SUPPLIER));
    }
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    String discoverableName = String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "ArgumentCheckApp", "SimpleService");
    Discoverable discoverable = new RandomEndpointStrategy(discoveryServiceClient.discover(discoverableName)).pick(5, TimeUnit.SECONDS);
    Assert.assertNotNull(discoverable);
    URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "ArgumentCheckApp", "SimpleService", "ping"));
    HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
    // this would fail had the service been started without the argument (initialize would have thrown)
    Assert.assertEquals(200, urlConn.getResponseCode());
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
}
Also used : ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) HttpURLConnection(java.net.HttpURLConnection) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) Arguments(co.cask.cdap.app.runtime.Arguments) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) URL(java.net.URL) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 8 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class FlowTest method testFlow.

@Test
public void testFlow() throws Exception {
    final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(WordCountApp.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        // running mapreduce is out of scope of this tests (there's separate unit-test for that)
        if (programDescriptor.getProgramId().getType() == ProgramType.MAPREDUCE) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    TimeUnit.SECONDS.sleep(1);
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "text");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 10; i++) {
        String msg = "Testing message " + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commit(tx);
    // Query the service for at most 10 seconds for the expected result
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "WordCountApp", "WordFrequencyService"));
    EndpointStrategy endpointStrategy = new RandomEndpointStrategy(serviceDiscovered);
    int trials = 0;
    while (trials++ < 10) {
        Discoverable discoverable = endpointStrategy.pick(2, TimeUnit.SECONDS);
        URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "WordCountApp", "WordFrequencyService", "wordfreq", "text:Testing"));
        try {
            HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
            Map<String, Long> responseContent = gson.fromJson(new InputStreamReader(urlConn.getInputStream(), Charsets.UTF_8), new TypeToken<Map<String, Long>>() {
            }.getType());
            LOG.info("Service response: " + responseContent);
            if (ImmutableMap.of("text:Testing", 10L).equals(responseContent)) {
                break;
            }
        } catch (Throwable t) {
            LOG.info("Exception when trying to query service.", t);
        }
        TimeUnit.SECONDS.sleep(1);
    }
    Assert.assertTrue(trials < 10);
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) Gson(com.google.gson.Gson) URL(java.net.URL) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) HttpURLConnection(java.net.HttpURLConnection) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) EndpointStrategy(co.cask.cdap.common.discovery.EndpointStrategy) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ServiceDiscovered(org.apache.twill.discovery.ServiceDiscovered) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) TypeToken(com.google.common.reflect.TypeToken) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 9 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class ProgramLifecycleService method start.

/**
   * Start a Program.
   *
   * @param programId  the {@link ProgramId program} to start
   * @param systemArgs system arguments
   * @param userArgs user arguments
   * @param debug enable debug mode
   * @return {@link ProgramRuntimeService.RuntimeInfo}
   * @throws IOException if there is an error starting the program
   * @throws ProgramNotFoundException if program is not found
   * @throws UnauthorizedException if the logged in user is not authorized to start the program. To start a program,
   *                               a user requires {@link Action#EXECUTE} on the program
   * @throws Exception if there were other exceptions checking if the current user is authorized to start the program
   */
public ProgramRuntimeService.RuntimeInfo start(final ProgramId programId, final Map<String, String> systemArgs, final Map<String, String> userArgs, boolean debug) throws Exception {
    authorizationEnforcer.enforce(programId, authenticationContext.getPrincipal(), Action.EXECUTE);
    ProgramDescriptor programDescriptor = store.loadProgram(programId);
    BasicArguments systemArguments = new BasicArguments(systemArgs);
    BasicArguments userArguments = new BasicArguments(userArgs);
    ProgramRuntimeService.RuntimeInfo runtimeInfo = runtimeService.run(programDescriptor, new SimpleProgramOptions(programId.getProgram(), systemArguments, userArguments, debug));
    final ProgramController controller = runtimeInfo.getController();
    final String runId = controller.getRunId().getId();
    final String twillRunId = runtimeInfo.getTwillRunId() == null ? null : runtimeInfo.getTwillRunId().getId();
    if (programId.getType() != ProgramType.MAPREDUCE && programId.getType() != ProgramType.SPARK && programId.getType() != ProgramType.WORKFLOW) {
        // MapReduce state recording is done by the MapReduceProgramRunner, Spark state recording
        // is done by SparkProgramRunner, and Workflow state recording is done by WorkflowProgramRunner.
        // TODO [JIRA: CDAP-2013] Same needs to be done for other programs as well
        controller.addListener(new AbstractListener() {

            @Override
            public void init(ProgramController.State state, @Nullable Throwable cause) {
                // Get start time from RunId
                long startTimeInSeconds = RunIds.getTime(controller.getRunId(), TimeUnit.SECONDS);
                if (startTimeInSeconds == -1) {
                    // If RunId is not time-based, use current time as start time
                    startTimeInSeconds = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
                }
                final long finalStartTimeInSeconds = startTimeInSeconds;
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setStart(programId, runId, finalStartTimeInSeconds, twillRunId, userArgs, systemArgs);
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
                if (state == ProgramController.State.COMPLETED) {
                    completed();
                }
                if (state == ProgramController.State.ERROR) {
                    error(controller.getFailureCause());
                }
            }

            @Override
            public void completed() {
                LOG.debug("Program {} completed successfully.", programId);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setStop(programId, runId, TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.COMPLETED.getRunStatus());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void killed() {
                LOG.debug("Program {} killed.", programId);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setStop(programId, runId, TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.KILLED.getRunStatus());
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void suspended() {
                LOG.debug("Suspending Program {} {}.", programId, runId);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setSuspend(programId, runId);
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void resuming() {
                LOG.debug("Resuming Program {} {}.", programId, runId);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setResume(programId, runId);
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }

            @Override
            public void error(final Throwable cause) {
                LOG.info("Program stopped with error {}, {}", programId, runId, cause);
                Retries.supplyWithRetries(new Supplier<Void>() {

                    @Override
                    public Void get() {
                        store.setStop(programId, runId, TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()), ProgramController.State.ERROR.getRunStatus(), new BasicThrowable(cause));
                        return null;
                    }
                }, RetryStrategies.fixDelay(Constants.Retry.RUN_RECORD_UPDATE_RETRY_DELAY_SECS, TimeUnit.SECONDS));
            }
        }, Threads.SAME_THREAD_EXECUTOR);
    }
    return runtimeInfo;
}
Also used : ProgramController(co.cask.cdap.app.runtime.ProgramController) RuntimeInfo(co.cask.cdap.app.runtime.ProgramRuntimeService.RuntimeInfo) BasicThrowable(co.cask.cdap.proto.BasicThrowable) AbstractListener(co.cask.cdap.internal.app.runtime.AbstractListener) Supplier(com.google.common.base.Supplier) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) BasicThrowable(co.cask.cdap.proto.BasicThrowable) ProgramRuntimeService(co.cask.cdap.app.runtime.ProgramRuntimeService)

Example 10 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class DynamicPartitionerWithAvroTest method runDynamicPartitionerMapReduce.

private void runDynamicPartitionerMapReduce(final List<? extends GenericRecord> records, boolean allowConcurrentWriters, boolean expectedStatus) throws Exception {
    ApplicationWithPrograms app = deployApp(AppWithMapReduceUsingAvroDynamicPartitioner.class);
    final long now = System.currentTimeMillis();
    final Multimap<PartitionKey, GenericRecord> keyToRecordsMap = groupByPartitionKey(records, now);
    // write values to the input kvTable
    final KeyValueTable kvTable = datasetCache.getDataset(INPUT_DATASET);
    Transactions.createTransactionExecutor(txExecutorFactory, kvTable).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            // the keys are not used; it matters that they're unique though
            for (int i = 0; i < records.size(); i++) {
                kvTable.write(Integer.toString(i), records.get(i).toString());
            }
        }
    });
    String allowConcurrencyKey = "dataset." + OUTPUT_DATASET + "." + PartitionedFileSetArguments.DYNAMIC_PARTITIONER_ALLOW_CONCURRENCY;
    // run the partition writer m/r with this output partition time
    ImmutableMap<String, String> arguments = ImmutableMap.of(OUTPUT_PARTITION_KEY, Long.toString(now), allowConcurrencyKey, Boolean.toString(allowConcurrentWriters));
    long startTime = System.currentTimeMillis();
    boolean status = runProgram(app, AppWithMapReduceUsingAvroDynamicPartitioner.DynamicPartitioningMapReduce.class, new BasicArguments(arguments));
    Assert.assertEquals(expectedStatus, status);
    if (!expectedStatus) {
        // if we expect the program to fail, no need to check the output data for expected results
        return;
    }
    // Verify notifications
    List<Notification> notifications = getDataNotifications(startTime);
    Assert.assertEquals(1, notifications.size());
    Assert.assertEquals(NamespaceId.DEFAULT.dataset(OUTPUT_DATASET), DatasetId.fromString(notifications.get(0).getProperties().get("datasetId")));
    // this should have created a partition in the pfs
    final PartitionedFileSet pfs = datasetCache.getDataset(OUTPUT_DATASET);
    final Location pfsBaseLocation = pfs.getEmbeddedFileSet().getBaseLocation();
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws IOException {
            Map<PartitionKey, PartitionDetail> partitions = new HashMap<>();
            for (PartitionDetail partition : pfs.getPartitions(null)) {
                partitions.put(partition.getPartitionKey(), partition);
                // check that the mapreduce wrote the output partition metadata to all the output partitions
                Assert.assertEquals(AppWithMapReduceUsingAvroDynamicPartitioner.DynamicPartitioningMapReduce.METADATA, partition.getMetadata().asMap());
            }
            Assert.assertEquals(3, partitions.size());
            Assert.assertEquals(keyToRecordsMap.keySet(), partitions.keySet());
            // Check relative paths of the partitions. Also check that their location = pfs baseLocation + relativePath
            for (Map.Entry<PartitionKey, PartitionDetail> partitionKeyEntry : partitions.entrySet()) {
                PartitionDetail partitionDetail = partitionKeyEntry.getValue();
                String relativePath = partitionDetail.getRelativePath();
                int zip = (int) partitionKeyEntry.getKey().getField("zip");
                Assert.assertEquals(Long.toString(now) + Path.SEPARATOR + zip, relativePath);
                Assert.assertEquals(pfsBaseLocation.append(relativePath), partitionDetail.getLocation());
            }
            for (Map.Entry<PartitionKey, Collection<GenericRecord>> keyToRecordsEntry : keyToRecordsMap.asMap().entrySet()) {
                Set<GenericRecord> genericRecords = new HashSet<>(keyToRecordsEntry.getValue());
                Assert.assertEquals(genericRecords, readOutput(partitions.get(keyToRecordsEntry.getKey()).getLocation()));
            }
        }
    });
}
Also used : HashSet(java.util.HashSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Set(java.util.Set) PartitionDetail(co.cask.cdap.api.dataset.lib.PartitionDetail) Notification(co.cask.cdap.proto.Notification) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) GenericRecord(org.apache.avro.generic.GenericRecord) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) IOException(java.io.IOException) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) TransactionAware(org.apache.tephra.TransactionAware) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Location(org.apache.twill.filesystem.Location)

Aggregations

BasicArguments (co.cask.cdap.internal.app.runtime.BasicArguments)30 ApplicationWithPrograms (co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms)18 Test (org.junit.Test)17 SimpleProgramOptions (co.cask.cdap.internal.app.runtime.SimpleProgramOptions)11 ProgramDescriptor (co.cask.cdap.app.program.ProgramDescriptor)10 ProgramController (co.cask.cdap.app.runtime.ProgramController)9 File (java.io.File)5 TransactionExecutor (org.apache.tephra.TransactionExecutor)5 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)4 AbstractListener (co.cask.cdap.internal.app.runtime.AbstractListener)4 IOException (java.io.IOException)4 TransactionAware (org.apache.tephra.TransactionAware)4 Location (org.apache.twill.filesystem.Location)4 FileSet (co.cask.cdap.api.dataset.lib.FileSet)3 Arguments (co.cask.cdap.app.runtime.Arguments)3 ProgramOptions (co.cask.cdap.app.runtime.ProgramOptions)3 RandomEndpointStrategy (co.cask.cdap.common.discovery.RandomEndpointStrategy)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Discoverable (org.apache.twill.discovery.Discoverable)3 DiscoveryServiceClient (org.apache.twill.discovery.DiscoveryServiceClient)3