use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class LineageWriterDataFabricFacade method createConsumer.
@Override
public QueueConsumer createConsumer(QueueName queueName, ConsumerConfig consumerConfig, int numGroups) throws IOException {
QueueConsumer consumer = queueClientFactory.createConsumer(queueName, consumerConfig, numGroups);
if (consumer instanceof TransactionAware) {
consumer = new CloseableQueueConsumer(datasetCache, consumer);
datasetCache.addExtraTransactionAware((TransactionAware) consumer);
}
return consumer;
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class FlowTest method testFlow.
@Test
public void testFlow() throws Exception {
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(WordCountApp.class, TEMP_FOLDER_SUPPLIER);
List<ProgramController> controllers = Lists.newArrayList();
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
// running mapreduce is out of scope of this tests (there's separate unit-test for that)
if (programDescriptor.getProgramId().getType() == ProgramType.MAPREDUCE) {
continue;
}
controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
}
TimeUnit.SECONDS.sleep(1);
TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "text");
QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
QueueProducer producer = queueClientFactory.createProducer(queueName);
// start tx to write in queue in tx
Transaction tx = txSystemClient.startShort();
((TransactionAware) producer).startTx(tx);
StreamEventCodec codec = new StreamEventCodec();
for (int i = 0; i < 10; i++) {
String msg = "Testing message " + i;
StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
producer.enqueue(new QueueEntry(codec.encodePayload(event)));
}
// commit tx
((TransactionAware) producer).commitTx();
txSystemClient.commitOrThrow(tx);
// Query the service for at most 10 seconds for the expected result
Gson gson = new Gson();
DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
ServiceDiscovered serviceDiscovered = discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getNamespace(), "WordCountApp", "WordFrequencyService"));
EndpointStrategy endpointStrategy = new RandomEndpointStrategy(serviceDiscovered);
int trials = 0;
while (trials++ < 10) {
Discoverable discoverable = endpointStrategy.pick(2, TimeUnit.SECONDS);
URL url = new URL(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "WordCountApp", "WordFrequencyService", "wordfreq", "text:Testing"));
try {
HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
Map<String, Long> responseContent = gson.fromJson(new InputStreamReader(urlConn.getInputStream(), Charsets.UTF_8), new TypeToken<Map<String, Long>>() {
}.getType());
LOG.info("Service response: " + responseContent);
if (ImmutableMap.of("text:Testing", 10L).equals(responseContent)) {
break;
}
} catch (Throwable t) {
LOG.info("Exception when trying to query service.", t);
}
TimeUnit.SECONDS.sleep(1);
}
Assert.assertTrue(trials < 10);
for (ProgramController controller : controllers) {
controller.stop().get();
}
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class OpenCloseDataSetTest method testDataSetsAreClosed.
@Test(timeout = 120000)
public void testDataSetsAreClosed() throws Exception {
final String tableName = "foo";
TrackingTable.resetTracker();
ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(DummyAppWithTrackingTable.class, TEMP_FOLDER_SUPPLIER);
List<ProgramController> controllers = Lists.newArrayList();
// start the programs
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
continue;
}
controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
}
// write some data to queue
TransactionSystemClient txSystemClient = AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class);
QueueName queueName = QueueName.fromStream(app.getApplicationId().getNamespace(), "xx");
QueueClientFactory queueClientFactory = AppFabricTestHelper.getInjector().getInstance(QueueClientFactory.class);
QueueProducer producer = queueClientFactory.createProducer(queueName);
// start tx to write in queue in tx
Transaction tx = txSystemClient.startShort();
((TransactionAware) producer).startTx(tx);
StreamEventCodec codec = new StreamEventCodec();
for (int i = 0; i < 4; i++) {
String msg = "x" + i;
StreamEvent event = new StreamEvent(ImmutableMap.<String, String>of(), ByteBuffer.wrap(msg.getBytes(Charsets.UTF_8)));
producer.enqueue(new QueueEntry(codec.encodePayload(event)));
}
// commit tx
((TransactionAware) producer).commitTx();
txSystemClient.commitOrThrow(tx);
while (TrackingTable.getTracker(tableName, "write") < 4) {
TimeUnit.MILLISECONDS.sleep(50);
}
// get the number of writes to the foo table
Assert.assertEquals(4, TrackingTable.getTracker(tableName, "write"));
// only 2 "open" calls should be tracked:
// 1. the flow has started with single flowlet (service is loaded lazily on 1st request)
// 2. DatasetSystemMetadataWriter also instantiates the dataset because it needs to add some system tags
// for the dataset
Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
// now send a request to the service
Gson gson = new Gson();
DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
Discoverable discoverable = new RandomEndpointStrategy(discoveryServiceClient.discover(String.format("service.%s.%s.%s", DefaultId.NAMESPACE.getEntityName(), "dummy", "DummyService"))).pick(5, TimeUnit.SECONDS);
Assert.assertNotNull(discoverable);
HttpClient client = new DefaultHttpClient();
HttpGet get = new HttpGet(String.format("http://%s:%d/v3/namespaces/default/apps/%s/services/%s/methods/%s", discoverable.getSocketAddress().getHostName(), discoverable.getSocketAddress().getPort(), "dummy", "DummyService", "x1"));
HttpResponse response = client.execute(get);
String responseContent = gson.fromJson(new InputStreamReader(response.getEntity().getContent(), Charsets.UTF_8), String.class);
client.getConnectionManager().shutdown();
Assert.assertEquals("x1", responseContent);
// now the dataset must have a read and another open operation
Assert.assertEquals(1, TrackingTable.getTracker(tableName, "read"));
Assert.assertEquals(3, TrackingTable.getTracker(tableName, "open"));
// The dataset that was instantiated by the DatasetSystemMetadataWriter should have been closed
Assert.assertEquals(1, TrackingTable.getTracker(tableName, "close"));
// stop all programs, they should both close the data set foo
for (ProgramController controller : controllers) {
controller.stop().get();
}
int timesOpened = TrackingTable.getTracker(tableName, "open");
Assert.assertTrue(timesOpened >= 2);
Assert.assertEquals(timesOpened, TrackingTable.getTracker(tableName, "close"));
// now start the m/r job
ProgramController controller = null;
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER);
}
}
Assert.assertNotNull(controller);
while (!controller.getState().equals(ProgramController.State.COMPLETED)) {
TimeUnit.MILLISECONDS.sleep(100);
}
// M/r job is done, one mapper and the m/r client should have opened and closed the data set foo
// we don't know the exact number of times opened, but it is at least once, and it must be closed the same number
// of times.
Assert.assertTrue(timesOpened < TrackingTable.getTracker(tableName, "open"));
Assert.assertEquals(TrackingTable.getTracker(tableName, "open"), TrackingTable.getTracker(tableName, "close"));
Assert.assertTrue(0 < TrackingTable.getTracker("bar", "open"));
Assert.assertEquals(TrackingTable.getTracker("bar", "open"), TrackingTable.getTracker("bar", "close"));
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class ProgramNotificationSubscriberServiceTest method testAppSpecNotRequiredToWriteState.
@Test
public void testAppSpecNotRequiredToWriteState() throws Exception {
Injector injector = AppFabricTestHelper.getInjector();
CConfiguration cConf = injector.getInstance(CConfiguration.class);
ProgramNotificationSubscriberService programNotificationSubscriberService = injector.getInstance(ProgramNotificationSubscriberService.class);
programNotificationSubscriberService.startAndWait();
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
TransactionExecutorFactory txExecutorFactory = injector.getInstance(TransactionExecutorFactory.class);
DatasetId storeTable = NamespaceId.SYSTEM.dataset(Constants.AppMetaStore.TABLE);
Table table = DatasetsUtil.getOrCreateDataset(datasetFramework, storeTable, Table.class.getName(), DatasetProperties.EMPTY, Collections.<String, String>emptyMap());
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
final TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
ProgramStateWriter programStateWriter = injector.getInstance(ProgramStateWriter.class);
ProgramId programId = NamespaceId.DEFAULT.app("someapp").program(ProgramType.SERVICE, "s");
ProgramOptions programOptions = new SimpleProgramOptions(programId);
final ProgramRunId runId = programId.run(RunIds.generate());
programStateWriter.start(runId, programOptions, null);
Tasks.waitFor(ProgramRunStatus.STARTING, () -> txnl.execute(() -> {
RunRecordMeta meta = metadataStoreDataset.getRun(runId);
return meta == null ? null : meta.getStatus();
}), 10, TimeUnit.SECONDS);
programStateWriter.running(runId, UUID.randomUUID().toString());
Tasks.waitFor(ProgramRunStatus.RUNNING, () -> txnl.execute(() -> {
RunRecordMeta meta = metadataStoreDataset.getRun(runId);
return meta == null ? null : meta.getStatus();
}), 10, TimeUnit.SECONDS);
programStateWriter.killed(runId);
Tasks.waitFor(ProgramRunStatus.KILLED, () -> txnl.execute(() -> {
RunRecordMeta meta = metadataStoreDataset.getRun(runId);
return meta == null ? null : meta.getStatus();
}), 10, TimeUnit.SECONDS);
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class DynamicPartitionerWithAvroTest method runDynamicPartitionerMR.
private void runDynamicPartitionerMR(final List<? extends GenericRecord> records, boolean allowConcurrentWriters, final boolean precreatePartitions, @Nullable final DynamicPartitioner.PartitionWriteOption partitionWriteOption, boolean expectedStatus) throws Exception {
ApplicationWithPrograms app = deployApp(AppWithMapReduceUsingAvroDynamicPartitioner.class);
final long now = System.currentTimeMillis();
final Multimap<PartitionKey, GenericRecord> keyToRecordsMap = groupByPartitionKey(records, now);
// write values to the input kvTable
final KeyValueTable kvTable = datasetCache.getDataset(INPUT_DATASET);
Transactions.createTransactionExecutor(txExecutorFactory, kvTable).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
// the keys are not used; it matters that they're unique though
for (int i = 0; i < records.size(); i++) {
kvTable.write(Integer.toString(i), records.get(i).toString());
}
}
});
final PartitionedFileSet pfs = datasetCache.getDataset(OUTPUT_DATASET);
if (precreatePartitions) {
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws IOException {
writeFile(pfs, createKey(now, 95111));
writeFile(pfs, createKey(now, 98123));
writeFile(pfs, createKey(now, 84125));
}
});
}
String allowConcurrencyKey = "dataset." + OUTPUT_DATASET + "." + PartitionedFileSetArguments.DYNAMIC_PARTITIONER_ALLOW_CONCURRENCY;
// run the partition writer m/r with this output partition time
Map<String, String> arguments = new HashMap<>();
arguments.put(OUTPUT_PARTITION_KEY, Long.toString(now));
arguments.put(allowConcurrencyKey, Boolean.toString(allowConcurrentWriters));
if (partitionWriteOption != null) {
arguments.put("partitionWriteOption", partitionWriteOption.name());
}
long startTime = System.currentTimeMillis();
boolean status = runProgram(app, AppWithMapReduceUsingAvroDynamicPartitioner.DynamicPartitioningMapReduce.class, new BasicArguments(arguments));
Assert.assertEquals(expectedStatus, status);
if (!expectedStatus) {
// if we expect the program to fail, no need to check the output data for expected results
return;
}
// Verify notifications
List<Notification> notifications = getDataNotifications(startTime);
Assert.assertEquals(1, notifications.size());
Assert.assertEquals(NamespaceId.DEFAULT.dataset(OUTPUT_DATASET), DatasetId.fromString(notifications.get(0).getProperties().get("datasetId")));
// this should have created a partition in the pfs
final Location pfsBaseLocation = pfs.getEmbeddedFileSet().getBaseLocation();
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws IOException {
Map<PartitionKey, PartitionDetail> partitions = new HashMap<>();
for (PartitionDetail partition : pfs.getPartitions(null)) {
partitions.put(partition.getPartitionKey(), partition);
// check that the mapreduce wrote the output partition metadata to all the output partitions
Assert.assertEquals(getExpectedMetadata(precreatePartitions, partitionWriteOption), partition.getMetadata().asMap());
// if files were precreated, and the option is to append, expect the empty file to exist
// if partition write option is configured to overwrite, then the file is expected to not exist
Location preexistingFile = partition.getLocation().append("file");
if (precreatePartitions && partitionWriteOption == DynamicPartitioner.PartitionWriteOption.CREATE_OR_APPEND) {
Assert.assertTrue(preexistingFile.exists());
try (InputStream inputStream = preexistingFile.getInputStream()) {
Assert.assertEquals(-1, inputStream.read());
}
} else {
Assert.assertFalse(preexistingFile.exists());
}
}
Assert.assertEquals(3, partitions.size());
Assert.assertEquals(keyToRecordsMap.keySet(), partitions.keySet());
// Check relative paths of the partitions. Also check that their location = pfs baseLocation + relativePath
for (Map.Entry<PartitionKey, PartitionDetail> partitionKeyEntry : partitions.entrySet()) {
PartitionDetail partitionDetail = partitionKeyEntry.getValue();
String relativePath = partitionDetail.getRelativePath();
int zip = (int) partitionKeyEntry.getKey().getField("zip");
Assert.assertEquals(Long.toString(now) + Path.SEPARATOR + zip, relativePath);
Assert.assertEquals(pfsBaseLocation.append(relativePath), partitionDetail.getLocation());
}
for (Map.Entry<PartitionKey, Collection<GenericRecord>> keyToRecordsEntry : keyToRecordsMap.asMap().entrySet()) {
Set<GenericRecord> genericRecords = new HashSet<>(keyToRecordsEntry.getValue());
Assert.assertEquals(genericRecords, readOutput(partitions.get(keyToRecordsEntry.getKey()).getLocation()));
}
}
});
}
Aggregations