Search in sources :

Example 6 with QueueClientFactory

use of co.cask.cdap.data2.queue.QueueClientFactory in project cdap by caskdata.

the class FlowletProgramRunner method outputEmitterFactory.

private OutputEmitterFactory outputEmitterFactory(final BasicFlowletContext flowletContext, final String flowletName, final QueueClientFactory queueClientFactory, final ImmutableList.Builder<ProducerSupplier> producerBuilder, final Table<Node, String, Set<QueueSpecification>> queueSpecs) {
    return new OutputEmitterFactory() {

        @Override
        public <T> OutputEmitter<T> create(String outputName, TypeToken<T> type) {
            try {
                // first iterate over all queue specifications to find the queue name and all consumer flowlet ids
                QueueName queueName = null;
                List<String> consumerFlowlets = Lists.newLinkedList();
                Node flowlet = Node.flowlet(flowletName);
                Schema schema = schemaGenerator.generate(type.getType());
                for (Map.Entry<String, Set<QueueSpecification>> entry : queueSpecs.row(flowlet).entrySet()) {
                    for (QueueSpecification queueSpec : entry.getValue()) {
                        if (queueSpec.getQueueName().getSimpleName().equals(outputName) && queueSpec.getOutputSchema().equals(schema)) {
                            queueName = queueSpec.getQueueName();
                            consumerFlowlets.add(entry.getKey());
                            break;
                        }
                    }
                }
                if (queueName == null) {
                    throw new IllegalArgumentException(String.format("No queue specification found for %s, %s", flowletName, type));
                }
                // create a metric collector for this queue, and also one for each consumer flowlet
                final MetricsContext metrics = flowletContext.getProgramMetrics().childContext(Constants.Metrics.Tag.FLOWLET_QUEUE, outputName);
                final MetricsContext producerMetrics = metrics.childContext(Constants.Metrics.Tag.PRODUCER, flowletContext.getFlowletId());
                final Iterable<MetricsContext> consumerMetrics = Iterables.transform(consumerFlowlets, new Function<String, MetricsContext>() {

                    @Override
                    public MetricsContext apply(String consumer) {
                        return producerMetrics.childContext(Constants.Metrics.Tag.CONSUMER, consumer);
                    }
                });
                // create a queue metrics emitter that emit to all of the above collectors
                ProducerSupplier producerSupplier = new ProducerSupplier(queueName, queueClientFactory, new QueueMetrics() {

                    @Override
                    public void emitEnqueue(int count) {
                        metrics.increment("process.events.out", count);
                        for (MetricsContext collector : consumerMetrics) {
                            collector.increment("queue.pending", count);
                        }
                    }

                    @Override
                    public void emitEnqueueBytes(int bytes) {
                    // no-op
                    }
                });
                producerBuilder.add(producerSupplier);
                return new DatumOutputEmitter<>(producerSupplier, schema, datumWriterFactory.create(type, schema));
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }
        }
    };
}
Also used : Set(java.util.Set) ImmutableSet(com.google.common.collect.ImmutableSet) Node(co.cask.cdap.app.queue.QueueSpecificationGenerator.Node) Schema(co.cask.cdap.api.data.schema.Schema) MetricsContext(co.cask.cdap.api.metrics.MetricsContext) UnsupportedTypeException(co.cask.cdap.api.data.schema.UnsupportedTypeException) IOException(java.io.IOException) QueueMetrics(co.cask.cdap.data2.transaction.queue.QueueMetrics) TypeToken(com.google.common.reflect.TypeToken) QueueSpecification(co.cask.cdap.app.queue.QueueSpecification) QueueName(co.cask.cdap.common.queue.QueueName) Map(java.util.Map)

Example 7 with QueueClientFactory

use of co.cask.cdap.data2.queue.QueueClientFactory in project cdap by caskdata.

the class OpenCloseDataSetTest method testDataSetsAreClosed.

@Test(timeout = 120000)
public void testDataSetsAreClosed() throws Exception {
    final String tableName = "foo";
    TrackingTable.resetTracker();
    ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(DummyAppWithTrackingTable.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    // start the programs
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    // write some data to queue
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "xx");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 4; i++) {
        String msg = "x" + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commit(tx);
    while (TrackingTable.getTracker(tableName, "write") < 4) {
        TimeUnit.MILLISECONDS.sleep(50);
    }
    // get the number of writes to the foo table
    Assert.assertEquals(4, TrackingTable.getTracker(tableName, "write"));
    // only 2 "open" calls should be tracked:
    // 1. the flow has started with single flowlet (service is loaded lazily on 1st request)
    // 2. DatasetSystemMetadataWriter also instantiates the dataset because it needs to add some system tags
    // for the dataset
    Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
    // now send a request to the service
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    Discoverable discoverable = new RandomEndpointStrategy(discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getEntityName(), "dummy", "DummyService"))).pick(5, TimeUnit.SECONDS);
    Assert.assertNotNull(discoverable);
    HttpClient client = new DefaultHttpClient();
    HttpGet get = new HttpGet(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "dummy", "DummyService", "x1"));
    HttpResponse response = client.execute(get);
    String responseContent = gson.fromJson(new InputStreamReader(response.getEntity().getContent(), Charsets.UTF_8), String.class);
    client.getConnectionManager().shutdown();
    Assert.assertEquals("x1", responseContent);
    // now the dataset must have a read and another open operation
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "read"));
    Assert.assertEquals(3, TrackingTable.getTracker(tableName, "open"));
    // The dataset that was instantiated by the DatasetSystemMetadataWriter should have been closed
    Assert.assertEquals(1, TrackingTable.getTracker(tableName, "close"));
    // stop all programs, they should both close the data set foo
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
    int timesOpened = TrackingTable.getTracker(tableName, "open");
    Assert.assertTrue(timesOpened >= 2);
    Assert.assertEquals(timesOpened, TrackingTable.getTracker(tableName, "close"));
    // now start the m/r job
    ProgramController controller = null;
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
            controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER);
        }
    }
    Assert.assertNotNull(controller);
    while (!controller.getState().equals(ProgramController.State.COMPLETED)) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    // M/r job is done, one mapper and the m/r client should have opened and closed the data set foo
    // we don't know the exact number of times opened, but it is at least once, and it must be closed the same number
    // of times.
    Assert.assertTrue(timesOpened < TrackingTable.getTracker(tableName, "open"));
    Assert.assertEquals(TrackingTable.getTracker(tableName, "open"), TrackingTable.getTracker(tableName, "close"));
    Assert.assertTrue(0 < TrackingTable.getTracker("bar", "open"));
    Assert.assertEquals(TrackingTable.getTracker("bar", "open"), TrackingTable.getTracker("bar", "close"));
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) HttpGet(org.apache.http.client.methods.HttpGet) Gson(com.google.gson.Gson) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) HttpResponse(org.apache.http.HttpResponse) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) DefaultHttpClient(org.apache.http.impl.client.DefaultHttpClient) HttpClient(org.apache.http.client.HttpClient) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 8 with QueueClientFactory

use of co.cask.cdap.data2.queue.QueueClientFactory in project cdap by caskdata.

the class FlowTest method testFlow.

@Test
public void testFlow() throws Exception {
    final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(WordCountApp.class, TEMP_FOLDER_SUPPLIER);
    List<ProgramController> controllers = Lists.newArrayList();
    for (ProgramDescriptor programDescriptor : app.getPrograms()) {
        // running mapreduce is out of scope of this tests (there's separate unit-test for that)
        if (programDescriptor.getProgramId().getType() == ProgramType.MAPREDUCE) {
            continue;
        }
        controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
    }
    TimeUnit.SECONDS.sleep(1);
    TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
    QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "text");
    QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
    QueueProducer producer = queueClientFactory.createProducer(queueName);
    // start tx to write in queue in tx
    Transaction tx = txSystemClient.startShort();
    ((TransactionAware) producer).startTx(tx);
    StreamEventCodec codec = new StreamEventCodec();
    for (int i = 0; i < 10; i++) {
        String msg = "Testing message " + i;
        StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
        producer.enqueue(new QueueEntry(codec.encodePayload(event)));
    }
    // commit tx
    ((TransactionAware) producer).commitTx();
    txSystemClient.commit(tx);
    // Query the service for at most 10 seconds for the expected result
    Gson gson = new Gson();
    DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
    ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "WordCountApp", "WordFrequencyService"));
    EndpointStrategy endpointStrategy = new RandomEndpointStrategy(serviceDiscovered);
    int trials = 0;
    while (trials++ < 10) {
        Discoverable discoverable = endpointStrategy.pick(2, TimeUnit.SECONDS);
        URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "WordCountApp", "WordFrequencyService", "wordfreq", "text:Testing"));
        try {
            HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
            Map<String, Long> responseContent = gson.fromJson(new InputStreamReader(urlConn.getInputStream(), Charsets.UTF_8), new TypeToken<Map<String, Long>>() {
            }.getType());
            LOG.info("Service response: " + responseContent);
            if (ImmutableMap.of("text:Testing", 10L).equals(responseContent)) {
                break;
            }
        } catch (Throwable t) {
            LOG.info("Exception when trying to query service.", t);
        }
        TimeUnit.SECONDS.sleep(1);
    }
    Assert.assertTrue(trials < 10);
    for (ProgramController controller : controllers) {
        controller.stop().get();
    }
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) Gson(com.google.gson.Gson) URL(java.net.URL) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) StreamEventCodec(co.cask.cdap.common.stream.StreamEventCodec) HttpURLConnection(java.net.HttpURLConnection) QueueProducer(co.cask.cdap.data2.queue.QueueProducer) EndpointStrategy(co.cask.cdap.common.discovery.EndpointStrategy) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) ProgramDescriptor(co.cask.cdap.app.program.ProgramDescriptor) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) QueueName(co.cask.cdap.common.queue.QueueName) ProgramController(co.cask.cdap.app.runtime.ProgramController) Discoverable(org.apache.twill.discovery.Discoverable) InputStreamReader(java.io.InputStreamReader) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ServiceDiscovered(org.apache.twill.discovery.ServiceDiscovered) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) TypeToken(com.google.common.reflect.TypeToken) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) RandomEndpointStrategy(co.cask.cdap.common.discovery.RandomEndpointStrategy) Test(org.junit.Test)

Example 9 with QueueClientFactory

use of co.cask.cdap.data2.queue.QueueClientFactory in project cdap by caskdata.

the class HBaseStreamConsumerTest method init.

@BeforeClass
public static void init() throws Exception {
    zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build();
    zkServer.startAndWait();
    Configuration hConf = TEST_HBASE.getConfiguration();
    cConf.setInt(Constants.Stream.CONTAINER_INSTANCES, 1);
    cConf.set(Constants.CFG_LOCAL_DATA_DIR, TMP_FOLDER.newFolder().getAbsolutePath());
    cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
    Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new ZKClientModule(), new NonCustomLocationUnitTestModule().getModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new TransactionMetricsModule(), new DataSetsModules().getInMemoryModules(), new SystemDatasetRuntimeModule().getInMemoryModules(), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule(), Modules.override(new DataFabricModules().getDistributedModules(), new StreamAdminModules().getDistributedModules()).with(new AbstractModule() {

        @Override
        protected void configure() {
            bind(TransactionStateStorage.class).to(NoOpTransactionStateStorage.class);
            bind(TransactionSystemClient.class).to(InMemoryTxSystemClient.class).in(Singleton.class);
            bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
            bind(NotificationFeedManager.class).to(NoOpNotificationFeedManager.class);
            bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
            bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
            bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
        }
    }));
    zkClientService = injector.getInstance(ZKClientService.class);
    zkClientService.startAndWait();
    streamAdmin = injector.getInstance(StreamAdmin.class);
    consumerFactory = injector.getInstance(StreamConsumerFactory.class);
    txClient = injector.getInstance(TransactionSystemClient.class);
    txManager = TxInMemory.getTransactionManager(txClient);
    queueClientFactory = injector.getInstance(QueueClientFactory.class);
    fileWriterFactory = injector.getInstance(StreamFileWriterFactory.class);
    txManager.startAndWait();
    tableUtil = injector.getInstance(HBaseTableUtil.class);
    ddlExecutor = new HBaseDDLExecutorFactory(cConf, TEST_HBASE.getHBaseAdmin().getConfiguration()).get();
    ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NamespaceId.SYSTEM));
    ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(TEST_NAMESPACE));
    ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(OTHER_NAMESPACE));
    setupNamespaces(injector.getInstance(NamespacedLocationFactory.class));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ConfigModule(co.cask.cdap.common.guice.ConfigModule) UGIProvider(co.cask.cdap.security.impersonation.UGIProvider) UnsupportedUGIProvider(co.cask.cdap.security.impersonation.UnsupportedUGIProvider) NamespacedLocationFactory(co.cask.cdap.common.namespace.NamespacedLocationFactory) StreamConsumerFactory(co.cask.cdap.data2.transaction.stream.StreamConsumerFactory) TransactionMetricsModule(co.cask.cdap.data.runtime.TransactionMetricsModule) ViewAdminModules(co.cask.cdap.data.view.ViewAdminModules) ZKClientModule(co.cask.cdap.common.guice.ZKClientModule) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) Injector(com.google.inject.Injector) StreamMetaStore(co.cask.cdap.data.stream.service.StreamMetaStore) InMemoryStreamMetaStore(co.cask.cdap.data.stream.service.InMemoryStreamMetaStore) SimpleNamespaceQueryAdmin(co.cask.cdap.common.namespace.SimpleNamespaceQueryAdmin) NamespaceQueryAdmin(co.cask.cdap.common.namespace.NamespaceQueryAdmin) HBaseDDLExecutorFactory(co.cask.cdap.data2.util.hbase.HBaseDDLExecutorFactory) SystemDatasetRuntimeModule(co.cask.cdap.data.runtime.SystemDatasetRuntimeModule) DiscoveryRuntimeModule(co.cask.cdap.common.guice.DiscoveryRuntimeModule) NotificationFeedManager(co.cask.cdap.notifications.feeds.NotificationFeedManager) NoOpNotificationFeedManager(co.cask.cdap.notifications.feeds.service.NoOpNotificationFeedManager) AuthenticationContextModules(co.cask.cdap.security.auth.context.AuthenticationContextModules) DataSetsModules(co.cask.cdap.data.runtime.DataSetsModules) DefaultOwnerAdmin(co.cask.cdap.security.impersonation.DefaultOwnerAdmin) OwnerAdmin(co.cask.cdap.security.impersonation.OwnerAdmin) NonCustomLocationUnitTestModule(co.cask.cdap.common.guice.NonCustomLocationUnitTestModule) InMemoryTxSystemClient(org.apache.tephra.inmemory.InMemoryTxSystemClient) AuthorizationTestModule(co.cask.cdap.security.authorization.AuthorizationTestModule) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) AbstractModule(com.google.inject.AbstractModule) StreamAdminModules(co.cask.cdap.data.stream.StreamAdminModules) StreamAdmin(co.cask.cdap.data2.transaction.stream.StreamAdmin) StreamFileWriterFactory(co.cask.cdap.data.stream.StreamFileWriterFactory) ZKClientService(org.apache.twill.zookeeper.ZKClientService) ExploreClientModule(co.cask.cdap.explore.guice.ExploreClientModule) QueueClientFactory(co.cask.cdap.data2.queue.QueueClientFactory) NoOpTransactionStateStorage(org.apache.tephra.persist.NoOpTransactionStateStorage) TransactionStateStorage(org.apache.tephra.persist.TransactionStateStorage) DataFabricModules(co.cask.cdap.data.runtime.DataFabricModules) AuthorizationEnforcementModule(co.cask.cdap.security.authorization.AuthorizationEnforcementModule) BeforeClass(org.junit.BeforeClass)

Example 10 with QueueClientFactory

use of co.cask.cdap.data2.queue.QueueClientFactory in project cdap by caskdata.

the class HBaseQueueTest method testQueueUpgrade.

// This test upgrade from old queue (salted base) to new queue (sharded base)
@Test(timeout = 30000L)
public void testQueueUpgrade() throws Exception {
    final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "upgrade");
    HBaseQueueAdmin hbaseQueueAdmin = (HBaseQueueAdmin) queueAdmin;
    HBaseQueueClientFactory hBaseQueueClientFactory = (HBaseQueueClientFactory) queueClientFactory;
    // Create the old queue table explicitly
    HBaseQueueAdmin oldQueueAdmin = new HBaseQueueAdmin(hConf, cConf, injector.getInstance(LocationFactory.class), injector.getInstance(HBaseTableUtil.class), injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionExecutorFactory.class), QueueConstants.QueueType.QUEUE, injector.getInstance(NamespaceQueryAdmin.class), injector.getInstance(Impersonator.class));
    oldQueueAdmin.create(queueName);
    int buckets = cConf.getInt(QueueConstants.ConfigKeys.QUEUE_TABLE_PRESPLITS);
    try (final HBaseQueueProducer oldProducer = hBaseQueueClientFactory.createProducer(oldQueueAdmin, queueName, QueueConstants.QueueType.QUEUE, QueueMetrics.NOOP_QUEUE_METRICS, new SaltedHBaseQueueStrategy(tableUtil, buckets), new ArrayList<ConsumerGroupConfig>())) {
        // Enqueue 10 items to old queue table
        Transactions.createTransactionExecutor(executorFactory, oldProducer).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                for (int i = 0; i < 10; i++) {
                    oldProducer.enqueue(new QueueEntry("key", i, Bytes.toBytes("Message " + i)));
                }
            }
        });
    }
    // Configure the consumer
    final ConsumerConfig consumerConfig = new ConsumerConfig(0L, 0, 1, DequeueStrategy.HASH, "key");
    try (QueueConfigurer configurer = queueAdmin.getQueueConfigurer(queueName)) {
        Transactions.createTransactionExecutor(executorFactory, configurer).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                configurer.configureGroups(ImmutableList.of(consumerConfig));
            }
        });
    }
    // explicit set the consumer state to be the lowest start row
    try (HBaseConsumerStateStore stateStore = hbaseQueueAdmin.getConsumerStateStore(queueName)) {
        Transactions.createTransactionExecutor(executorFactory, stateStore).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                stateStore.updateState(consumerConfig.getGroupId(), consumerConfig.getInstanceId(), QueueEntryRow.getQueueEntryRowKey(queueName, 0L, 0));
            }
        });
    }
    // Enqueue 10 more items to new queue table
    createEnqueueRunnable(queueName, 10, 1, null).run();
    // Verify both old and new table have 10 rows each
    Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.QUEUE)));
    Assert.assertEquals(10, countRows(hbaseQueueAdmin.getDataTableId(queueName, QueueConstants.QueueType.SHARDED_QUEUE)));
    // Create a consumer. It should see all 20 items
    final List<String> messages = Lists.newArrayList();
    try (final QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, 1)) {
        while (messages.size() != 20) {
            Transactions.createTransactionExecutor(executorFactory, (TransactionAware) consumer).execute(new TransactionExecutor.Subroutine() {

                @Override
                public void apply() throws Exception {
                    DequeueResult<byte[]> result = consumer.dequeue(20);
                    for (byte[] data : result) {
                        messages.add(Bytes.toString(data));
                    }
                }
            });
        }
    }
    verifyQueueIsEmpty(queueName, ImmutableList.of(consumerConfig));
}
Also used : QueueConfigurer(co.cask.cdap.data2.transaction.queue.QueueConfigurer) TransactionExecutorFactory(org.apache.tephra.TransactionExecutorFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) DequeueResult(co.cask.cdap.data2.queue.DequeueResult) NamespaceQueryAdmin(co.cask.cdap.common.namespace.NamespaceQueryAdmin) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) QueueName(co.cask.cdap.common.queue.QueueName) ConsumerGroupConfig(co.cask.cdap.data2.queue.ConsumerGroupConfig) TransactionExecutor(org.apache.tephra.TransactionExecutor) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) Impersonator(co.cask.cdap.security.impersonation.Impersonator) QueueEntry(co.cask.cdap.data2.queue.QueueEntry) IOException(java.io.IOException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) LocationFactory(org.apache.twill.filesystem.LocationFactory) QueueConsumer(co.cask.cdap.data2.queue.QueueConsumer) TransactionAware(org.apache.tephra.TransactionAware) Test(org.junit.Test) QueueTest(co.cask.cdap.data2.transaction.queue.QueueTest)

Aggregations

QueueClientFactory (co.cask.cdap.data2.queue.QueueClientFactory)11 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)8 ConfigModule (co.cask.cdap.common.guice.ConfigModule)7 DiscoveryRuntimeModule (co.cask.cdap.common.guice.DiscoveryRuntimeModule)7 DataSetsModules (co.cask.cdap.data.runtime.DataSetsModules)7 TransactionMetricsModule (co.cask.cdap.data.runtime.TransactionMetricsModule)7 AuthenticationContextModules (co.cask.cdap.security.auth.context.AuthenticationContextModules)7 AuthorizationEnforcementModule (co.cask.cdap.security.authorization.AuthorizationEnforcementModule)7 AuthorizationTestModule (co.cask.cdap.security.authorization.AuthorizationTestModule)7 TransactionExecutorFactory (org.apache.tephra.TransactionExecutorFactory)7 NonCustomLocationUnitTestModule (co.cask.cdap.common.guice.NonCustomLocationUnitTestModule)6 AbstractModule (com.google.inject.AbstractModule)6 BeforeClass (org.junit.BeforeClass)6 DefaultOwnerAdmin (co.cask.cdap.security.impersonation.DefaultOwnerAdmin)5 Injector (com.google.inject.Injector)5 SystemDatasetRuntimeModule (co.cask.cdap.data.runtime.SystemDatasetRuntimeModule)4 StreamAdminModules (co.cask.cdap.data.stream.StreamAdminModules)4 InMemoryStreamMetaStore (co.cask.cdap.data.stream.service.InMemoryStreamMetaStore)4 ViewAdminModules (co.cask.cdap.data.view.ViewAdminModules)4 ExploreClientModule (co.cask.cdap.explore.guice.ExploreClientModule)4