use of io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache in project cdap by caskdata.
the class CoreSchedulerServiceTest method beforeClass.
@BeforeClass
public static void beforeClass() throws Throwable {
AppFabricTestBase.beforeClass();
scheduler = getInjector().getInstance(Scheduler.class);
if (scheduler instanceof Service) {
((Service) scheduler).startAndWait();
}
messagingService = getInjector().getInstance(MessagingService.class);
store = getInjector().getInstance(Store.class);
DynamicDatasetCache datasetCache = new MultiThreadDatasetCache(new SystemDatasetInstantiator(getInjector().getInstance(DatasetFramework.class)), getTxClient(), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null);
transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(datasetCache, Schedulers.SUBSCRIBER_TX_TIMEOUT_SECONDS), RetryStrategies.retryOnConflict(20, 100));
}
use of io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache in project cdap by caskdata.
the class LogFileManagerTest method testLogFileManager.
@Test
public void testLogFileManager() throws Exception {
int syncInterval = 1024 * 1024;
long maxLifeTimeMs = 50;
long maxFileSizeInBytes = 104857600;
DatasetManager datasetManager = new DefaultDatasetManager(injector.getInstance(DatasetFramework.class), NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(injector.getInstance(DatasetFramework.class)), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
LogFileManager logFileManager = new LogFileManager("700", "600", maxLifeTimeMs, maxFileSizeInBytes, syncInterval, fileMetaDataWriter, injector.getInstance(LocationFactory.class));
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("test", "testApp", "testFlow");
long timestamp = System.currentTimeMillis();
LogFileOutputStream outputStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1");
outputStream.append(event1);
// we are doing this, instead of calling getLogFileOutputStream to avoid race, if test machine can be slow.
Assert.assertNotNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
TimeUnit.MILLISECONDS.sleep(60);
logFileManager.flush();
// should be closed on flush, should return null
Assert.assertNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
LogFileOutputStream newLogOutStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
// make sure the new location we got is different
Assert.assertNotEquals(outputStream.getLocation(), newLogOutStream.getLocation());
}
use of io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache in project cdap by caskdata.
the class FileMetadataTest method testFileMetadataReadWrite.
@Test
public void testFileMetadataReadWrite() throws Exception {
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
for (int i = 10; i <= 100; i += 10) {
// i is the event time
fileMetaDataWriter.writeMetaData(logPathIdentifier, i, currentTime, location.append(Integer.toString(i)));
}
// for the timestamp 80, add new new log path id with different current time.
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 1, location.append("81"));
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 2, location.append("82"));
// reader test
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
Assert.assertEquals(12, fileMetadataReader.listFiles(logPathIdentifier, 0, 100).size());
Assert.assertEquals(5, fileMetadataReader.listFiles(logPathIdentifier, 20, 50).size());
Assert.assertEquals(2, fileMetadataReader.listFiles(logPathIdentifier, 100, 150).size());
// should include the latest file with event start time 80.
List<LogLocation> locationList = fileMetadataReader.listFiles(logPathIdentifier, 81, 85);
Assert.assertEquals(1, locationList.size());
Assert.assertEquals(80, locationList.get(0).getEventTimeMs());
Assert.assertEquals(location.append("82"), locationList.get(0).getLocation());
Assert.assertEquals(1, fileMetadataReader.listFiles(logPathIdentifier, 150, 1000).size());
}
use of io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache in project cdap by caskdata.
the class HiveExploreStructuredRecordTestRun method start.
@BeforeClass
public static void start() throws Exception {
initialize(tmpFolder);
DatasetModuleId moduleId = NAMESPACE_ID.datasetModule("email");
datasetFramework.addModule(moduleId, new EmailTableDefinition.EmailTableModule());
datasetFramework.addInstance("email", MY_TABLE, DatasetProperties.EMPTY);
transactional = Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), transactionSystemClient, NAMESPACE_ID, Collections.<String, String>emptyMap(), null, null));
transactional.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
// Accessing dataset instance to perform data operations
EmailTableDefinition.EmailTable table = context.getDataset(MY_TABLE.getDataset());
Assert.assertNotNull(table);
table.writeEmail("email1", "this is the subject", "this is the body", "sljackson@boss.com");
}
});
datasetFramework.addModule(NAMESPACE_ID.datasetModule("TableWrapper"), new TableWrapperDefinition.Module());
}
use of io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache in project cdap by caskdata.
the class HiveExploreStructuredRecordTestRun method start.
@BeforeClass
public static void start() throws Exception {
initialize(tmpFolder);
DatasetModuleId moduleId = NAMESPACE_ID.datasetModule("email");
datasetFramework.addModule(moduleId, new EmailTableDefinition.EmailTableModule());
datasetFramework.addInstance("email", MY_TABLE, DatasetProperties.EMPTY);
transactional = Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), transactionSystemClient, NAMESPACE_ID, Collections.<String, String>emptyMap(), null, null));
transactional.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
// Accessing dataset instance to perform data operations
EmailTableDefinition.EmailTable table = context.getDataset(MY_TABLE.getDataset());
Assert.assertNotNull(table);
table.writeEmail("email1", "this is the subject", "this is the body", "sljackson@boss.com");
}
});
datasetFramework.addModule(NAMESPACE_ID.datasetModule("TableWrapper"), new TableWrapperDefinition.Module());
}
Aggregations