use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.
the class HBaseTableTest method testColumnFamily.
@Test
public void testColumnFamily() throws Exception {
DatasetProperties props = TableProperties.builder().setColumnFamily("t").build();
String tableName = "testcf";
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
final BufferingTable table = getTable(CONTEXT1, tableName, props);
TransactionSystemClient txClient = new DetachedTxSystemClient();
TransactionExecutor executor = new DefaultTransactionExecutor(txClient, table);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
table.put(new Put("row", "column", "testValue"));
}
});
final BufferingTable table2 = getTable(CONTEXT1, tableName, props);
executor = new DefaultTransactionExecutor(txClient, table2);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals("testValue", table2.get(new Get("row", "column")).getString("column"));
}
});
// Verify the column family name
TableId hTableId = hBaseTableUtil.createHTableId(new NamespaceId(CONTEXT1.getNamespaceId()), tableName);
HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(TEST_HBASE.getHBaseAdmin(), hTableId);
HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes("t"));
Assert.assertNotNull(hcd);
Assert.assertEquals("t", hcd.getNameAsString());
}
use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.
the class TransactionServiceClientTest method resetState.
@Before
public void resetState() throws Exception {
TransactionSystemClient txClient = getClient();
txClient.resetState();
}
use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.
the class BaseHiveExploreServiceTest method initialize.
protected static void initialize(CConfiguration cConf, TemporaryFolder tmpFolder, boolean useStandalone, boolean enableAuthorization) throws Exception {
if (!runBefore) {
return;
}
Configuration hConf = new Configuration();
if (enableAuthorization) {
LocationFactory locationFactory = new LocalLocationFactory(tmpFolder.newFolder());
Location authExtensionJar = AppJarHelper.createDeploymentJar(locationFactory, InMemoryAuthorizer.class);
cConf.setBoolean(Constants.Security.ENABLED, true);
cConf.setBoolean(Constants.Security.Authorization.ENABLED, true);
cConf.set(Constants.Security.Authorization.EXTENSION_JAR_PATH, authExtensionJar.toURI().getPath());
cConf.setBoolean(Constants.Security.KERBEROS_ENABLED, false);
cConf.setInt(Constants.Security.Authorization.CACHE_MAX_ENTRIES, 0);
}
List<Module> modules = useStandalone ? createStandaloneModules(cConf, hConf, tmpFolder) : createInMemoryModules(cConf, hConf, tmpFolder);
injector = Guice.createInjector(modules);
transactionManager = injector.getInstance(TransactionManager.class);
transactionManager.startAndWait();
transactionSystemClient = injector.getInstance(TransactionSystemClient.class);
dsOpService = injector.getInstance(DatasetOpExecutor.class);
dsOpService.startAndWait();
datasetService = injector.getInstance(DatasetService.class);
datasetService.startAndWait();
exploreExecutorService = injector.getInstance(ExploreExecutorService.class);
exploreExecutorService.startAndWait();
datasetFramework = injector.getInstance(DatasetFramework.class);
exploreClient = injector.getInstance(DiscoveryExploreClient.class);
exploreService = injector.getInstance(ExploreService.class);
exploreClient.ping();
notificationService = injector.getInstance(NotificationService.class);
notificationService.startAndWait();
streamService = injector.getInstance(StreamService.class);
streamService.startAndWait();
streamHttpService = injector.getInstance(StreamHttpService.class);
streamHttpService.startAndWait();
exploreTableManager = injector.getInstance(ExploreTableManager.class);
streamAdmin = injector.getInstance(StreamAdmin.class);
streamMetaStore = injector.getInstance(StreamMetaStore.class);
namespaceAdmin = injector.getInstance(NamespaceAdmin.class);
namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
// create namespaces
// This happens when you create a namespace via REST APIs. However, since we do not start AppFabricServer in
// Explore tests, simulating that scenario by explicitly calling DatasetFramework APIs.
createNamespace(NamespaceId.DEFAULT);
createNamespace(NAMESPACE_ID);
createNamespace(OTHER_NAMESPACE_ID);
}
use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.
the class TransactionServiceTest method testHA.
@Test(timeout = 30000)
public void testHA() throws Exception {
// NOTE: we play with blocking/nonblocking a lot below
// as until we integrate with "leader election" stuff, service blocks on start if it is not a leader
// TODO: fix this by integration with generic leader election stuff
CConfiguration cConf = CConfiguration.create();
// tests should use the current user for HDFS
cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule(), new NonCustomLocationUnitTestModule().getModule(), new DiscoveryRuntimeModule().getDistributedModules(), new TransactionMetricsModule(), new AbstractModule() {
@Override
protected void configure() {
bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}, new DataFabricModules().getDistributedModules(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(MetadataStore.class).to(NoOpMetadataStore.class);
}
}), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule());
ZKClientService zkClient = injector.getInstance(ZKClientService.class);
zkClient.startAndWait();
try {
final Table table = createTable("myTable");
// tx service client
// NOTE: we can init it earlier than we start services, it should pick them up when they are available
TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
TransactionExecutor txExecutor = new DefaultTransactionExecutor(txClient, ImmutableList.of((TransactionAware) table));
// starting tx service, tx client can pick it up
TransactionService first = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
first.startAndWait();
Assert.assertNotNull(txClient.startShort());
verifyGetAndPut(table, txExecutor, null, "val1");
// starting another tx service should not hurt
TransactionService second = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
// NOTE: we don't have to wait for start as client should pick it up anyways, but we do wait to ensure
// the case with two active is handled well
second.startAndWait();
// wait for affect a bit
TimeUnit.SECONDS.sleep(1);
Assert.assertNotNull(txClient.startShort());
verifyGetAndPut(table, txExecutor, "val1", "val2");
// shutting down the first one is fine: we have another one to pick up the leader role
first.stopAndWait();
Assert.assertNotNull(txClient.startShort());
verifyGetAndPut(table, txExecutor, "val2", "val3");
// doing same trick again to failover to the third one
TransactionService third = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
// NOTE: we don't have to wait for start as client should pick it up anyways
third.start();
// stopping second one
second.stopAndWait();
Assert.assertNotNull(txClient.startShort());
verifyGetAndPut(table, txExecutor, "val3", "val4");
// releasing resources
third.stop();
} finally {
try {
dropTable("myTable");
} finally {
zkClient.stopAndWait();
}
}
}
use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.
@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
dsFramework.truncateInstance(Schedulers.STORE_DATASET_ID);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
final ProgramSchedule sched31 = new ProgramSchedule("sched31", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new ProgramStatusTrigger(PROG1_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
// event for PROG1 should trigger nothing. it should also return an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED)).isEmpty());
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched11, sched12, sched22, sched31));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for ProgramStatus should trigger only sched31
Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
// event for DS1 should trigger only sched11
Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers only sched12 and sched22
Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "program3 failed schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new ProgramStatusTrigger(PROG3_ID, ProgramStatus.FAILED), ImmutableList.<Constraint>of());
final ProgramSchedule sched31New = new ProgramSchedule(sched31.getName(), "program1 failed schedule", PROG3_ID, ImmutableMap.of("abcd", "efgh"), new ProgramStatusTrigger(PROG1_ID, ProgramStatus.FAILED), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.updateSchedule(sched11New);
store.updateSchedule(sched12New);
store.updateSchedule(sched22New);
store.updateSchedule(sched31New);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched12New after update
Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers no schedule after update
Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
// event for PS triggers only for failed program statuses, not completed nor killed
Assert.assertEquals(ImmutableSet.of(sched31New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
Assert.assertEquals(ImmutableSet.of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
Assert.assertEquals(ImmutableSet.of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
}
});
}
Aggregations