use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class PartitionConsumerTest method testPartitionConsumer.
@Test
public void testPartitionConsumer() throws Exception {
// exercises the edge case of partition consumption, when partitions are being consumed, while another in-progress
// transaction has added a partition, but it has not yet committed, so the partition is not available for the
// consumer
PartitionedFileSet dataset1 = dsFrameworkUtil.getInstance(pfsInstance);
PartitionedFileSet dataset2 = dsFrameworkUtil.getInstance(pfsInstance);
TransactionManager txManager = dsFrameworkUtil.getTxManager();
InMemoryTxSystemClient txClient = new InMemoryTxSystemClient(txManager);
// producer simply adds initial partition
TransactionContext txContext1 = new TransactionContext(txClient, (TransactionAware) dataset1);
txContext1.start();
PartitionKey partitionKey1 = generateUniqueKey();
dataset1.getPartitionOutput(partitionKey1).addPartition();
txContext1.finish();
// consumer simply consumes initial partition
TransactionContext txContext2 = new TransactionContext(txClient, (TransactionAware) dataset2);
txContext2.start();
PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset2, new InMemoryStatePersistor());
List<? extends PartitionDetail> partitionIterator = partitionConsumer.consumePartitions().getPartitions();
Assert.assertEquals(1, partitionIterator.size());
Assert.assertEquals(partitionKey1, partitionIterator.get(0).getPartitionKey());
txContext2.finish();
// producer adds a second partition, but does not yet commit the transaction
txContext1.start();
PartitionKey partitionKey2 = generateUniqueKey();
dataset1.getPartitionOutput(partitionKey2).addPartition();
// consumer attempts to consume at a time after the partition was added, but before it committed. Because of this,
// the partition is not visible and will not be consumed
txContext2.start();
Assert.assertTrue(partitionConsumer.consumePartitions().getPartitions().isEmpty());
txContext2.finish();
// producer commits the transaction in which the second partition was added
txContext1.finish();
// the next time the consumer runs, it processes the second partition
txContext2.start();
partitionIterator = partitionConsumer.consumePartitions().getPartitions();
Assert.assertEquals(1, partitionIterator.size());
Assert.assertEquals(partitionKey2, partitionIterator.get(0).getPartitionKey());
txContext2.finish();
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class RemoteDatasetFrameworkTest method before.
@Before
public void before() throws Exception {
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.setBoolean(Constants.Dangerous.UNRECOVERABLE_RESET, true);
Configuration txConf = HBaseConfiguration.create();
CConfigurationUtil.copyTxProperties(cConf, txConf);
// ok to pass null, since the impersonator won't actually be called, if kerberos security is not enabled
Impersonator impersonator = new DefaultImpersonator(cConf, null);
// TODO: Refactor to use injector for everything
Injector injector = Guice.createInjector(new ConfigModule(cConf, txConf), new DiscoveryRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new TransactionInMemoryModule(), new AbstractModule() {
@Override
protected void configure() {
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class).in(Singleton.class);
install(new FactoryModuleBuilder().implement(DatasetDefinitionRegistry.class, DefaultDatasetDefinitionRegistry.class).build(DatasetDefinitionRegistryFactory.class));
// through the injector, we only need RemoteDatasetFramework in these tests
bind(RemoteDatasetFramework.class);
}
});
// Tx Manager to support working with datasets
txManager = new TransactionManager(txConf);
txManager.startAndWait();
InMemoryTxSystemClient txSystemClient = new InMemoryTxSystemClient(txManager);
TransactionSystemClientService txSystemClientService = new DelegatingTransactionSystemClientService(txSystemClient);
DiscoveryService discoveryService = injector.getInstance(DiscoveryService.class);
DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
AuthenticationContext authenticationContext = injector.getInstance(AuthenticationContext.class);
framework = new RemoteDatasetFramework(cConf, discoveryServiceClient, registryFactory, authenticationContext);
SystemDatasetInstantiatorFactory datasetInstantiatorFactory = new SystemDatasetInstantiatorFactory(locationFactory, framework, cConf);
DatasetAdminService datasetAdminService = new DatasetAdminService(framework, cConf, locationFactory, datasetInstantiatorFactory, new NoOpMetadataStore(), impersonator);
ImmutableSet<HttpHandler> handlers = ImmutableSet.<HttpHandler>of(new DatasetAdminOpHTTPHandler(datasetAdminService));
opExecutorService = new DatasetOpExecutorService(cConf, discoveryService, metricsCollectionService, handlers);
opExecutorService.startAndWait();
ImmutableMap<String, DatasetModule> modules = ImmutableMap.<String, DatasetModule>builder().put("memoryTable", new InMemoryTableModule()).put("core", new CoreDatasetsModule()).putAll(DatasetMetaTableUtil.getModules()).build();
InMemoryDatasetFramework mdsFramework = new InMemoryDatasetFramework(registryFactory, modules);
DiscoveryExploreClient exploreClient = new DiscoveryExploreClient(discoveryServiceClient, authenticationContext);
ExploreFacade exploreFacade = new ExploreFacade(exploreClient, cConf);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txSystemClient);
AuthorizationEnforcer authorizationEnforcer = injector.getInstance(AuthorizationEnforcer.class);
DatasetTypeManager typeManager = new DatasetTypeManager(cConf, locationFactory, txSystemClientService, txExecutorFactory, mdsFramework, impersonator);
DatasetInstanceManager instanceManager = new DatasetInstanceManager(txSystemClientService, txExecutorFactory, mdsFramework);
PrivilegesManager privilegesManager = injector.getInstance(PrivilegesManager.class);
DatasetTypeService typeService = new DatasetTypeService(typeManager, namespaceQueryAdmin, namespacedLocationFactory, authorizationEnforcer, privilegesManager, authenticationContext, cConf, impersonator, txSystemClientService, mdsFramework, txExecutorFactory, DEFAULT_MODULES);
DatasetOpExecutor opExecutor = new LocalDatasetOpExecutor(cConf, discoveryServiceClient, opExecutorService, authenticationContext);
DatasetInstanceService instanceService = new DatasetInstanceService(typeService, instanceManager, opExecutor, exploreFacade, namespaceQueryAdmin, ownerAdmin, authorizationEnforcer, privilegesManager, authenticationContext);
instanceService.setAuditPublisher(inMemoryAuditPublisher);
service = new DatasetService(cConf, discoveryService, discoveryServiceClient, metricsCollectionService, new InMemoryDatasetOpExecutor(framework), new HashSet<DatasetMetricsReporter>(), typeService, instanceService);
// Start dataset service, wait for it to be discoverable
service.startAndWait();
EndpointStrategy endpointStrategy = new RandomEndpointStrategy(discoveryServiceClient.discover(Constants.Service.DATASET_MANAGER));
Preconditions.checkNotNull(endpointStrategy.pick(5, TimeUnit.SECONDS), "%s service is not up after 5 seconds", service);
createNamespace(NamespaceId.SYSTEM);
createNamespace(NAMESPACE_ID);
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class DatasetInstanceHandlerTest method testCreateDelete.
@Test
public void testCreateDelete() throws Exception {
try {
deployModule("default-table", InMemoryTableModule.class);
deployModule("default-core", CoreDatasetsModule.class);
// cannot create instance with same name again
Assert.assertEquals(HttpStatus.SC_OK, createInstance("myTable1", "table", DatasetProperties.EMPTY).getResponseCode());
Assert.assertEquals(HttpStatus.SC_OK, createInstance("myTable2", "table", DatasetProperties.EMPTY).getResponseCode());
Assert.assertEquals(2, getInstances().getResponseObject().size());
// we want to verify that data is also gone, so we write smth to tables first
final Table table1 = dsFramework.getDataset(NamespaceId.DEFAULT.dataset("myTable1"), DatasetDefinition.NO_ARGUMENTS, null);
final Table table2 = dsFramework.getDataset(NamespaceId.DEFAULT.dataset("myTable2"), DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table1);
Assert.assertNotNull(table2);
TransactionExecutor txExecutor = new DefaultTransactionExecutor(new InMemoryTxSystemClient(txManager), ImmutableList.of((TransactionAware) table1, (TransactionAware) table2));
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
table1.put(new Put("key1", "col1", "val1"));
table2.put(new Put("key2", "col2", "val2"));
}
});
// verify that we can read the data
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals("val1", table1.get(new Get("key1", "col1")).getString("col1"));
Assert.assertEquals("val2", table2.get(new Get("key2", "col2")).getString("col2"));
}
});
// delete table, check that it is deleted, create again and verify that it is empty
Assert.assertEquals(HttpStatus.SC_OK, deleteInstance("myTable1").getResponseCode());
ObjectResponse<List<DatasetSpecificationSummary>> instances = getInstances();
Assert.assertEquals(1, instances.getResponseObject().size());
Assert.assertEquals("myTable2", instances.getResponseObject().get(0).getName());
Assert.assertEquals(HttpStatus.SC_OK, createInstance("myTable1", "table", DatasetProperties.EMPTY).getResponseCode());
Assert.assertEquals(2, getInstances().getResponseObject().size());
// verify that table1 is empty. Note: it is ok for test purpose to re-use the table clients
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertTrue(table1.get(new Get("key1", "col1")).isEmpty());
Assert.assertEquals("val2", table2.get(new Get("key2", "col2")).getString("col2"));
// writing smth to table1 for subsequent test
table1.put(new Put("key3", "col3", "val3"));
}
});
// delete all tables, check that they deleted, create again and verify that they are empty
deleteInstances();
Assert.assertEquals(0, getInstances().getResponseObject().size());
Assert.assertEquals(HttpStatus.SC_OK, createInstance("myTable1", "table", DatasetProperties.EMPTY).getResponseCode());
Assert.assertEquals(HttpStatus.SC_OK, createInstance("myTable2", "table", DatasetProperties.EMPTY).getResponseCode());
Assert.assertEquals(2, getInstances().getResponseObject().size());
// verify that tables are empty. Note: it is ok for test purpose to re-use the table clients
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertTrue(table1.get(new Get("key3", "col3")).isEmpty());
Assert.assertTrue(table2.get(new Get("key2", "col2")).isEmpty());
}
});
} finally {
// cleanup
deleteInstances();
Assert.assertEquals(HttpStatus.SC_OK, deleteModules().getResponseCode());
}
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class SparkTransactionHandlerTest method init.
@BeforeClass
public static void init() throws UnknownHostException {
txManager = new TransactionManager(new Configuration());
txManager.startAndWait();
txClient = new InMemoryTxSystemClient(txManager);
sparkTxHandler = new SparkTransactionHandler(txClient);
httpService = new SparkDriverHttpService("test", InetAddress.getLoopbackAddress().getCanonicalHostName(), sparkTxHandler);
httpService.startAndWait();
sparkTxClient = new SparkTransactionClient(httpService.getBaseURI());
}
use of org.apache.tephra.inmemory.InMemoryTxSystemClient in project cdap by caskdata.
the class SparkTransactionHandlerTest method testFailureTransaction.
/**
* Tests the case where starting of transaction failed.
*/
@Test
public void testFailureTransaction() throws Exception {
TransactionManager txManager = new TransactionManager(new Configuration()) {
@Override
public Transaction startLong() {
throw new IllegalStateException("Cannot start long transaction");
}
};
txManager.startAndWait();
try {
SparkTransactionHandler txHandler = new SparkTransactionHandler(new InMemoryTxSystemClient(txManager));
SparkDriverHttpService httpService = new SparkDriverHttpService("test", InetAddress.getLoopbackAddress().getCanonicalHostName(), txHandler);
httpService.startAndWait();
try {
// Start a job
txHandler.jobStarted(1, ImmutableSet.of(2));
// Make a call to the stage transaction endpoint, it should throw TransactionFailureException
try {
new SparkTransactionClient(httpService.getBaseURI()).getTransaction(2, 1, TimeUnit.SECONDS);
Assert.fail("Should failed to get transaction");
} catch (TransactionFailureException e) {
// expected
}
// End the job
txHandler.jobEnded(1, false);
} finally {
httpService.stopAndWait();
}
} finally {
txManager.stopAndWait();
}
}
Aggregations