use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class LogCleanerTest method testLogCleanup.
@Test
public void testLogCleanup() throws Exception {
// use file meta data manager to write meta data in old format
// use file meta writer to write meta data in new format
// scan for old files and make sure we only get the old meta data entries.
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
long currentTime = System.currentTimeMillis();
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs", "testApp", "testEntity");
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
long startTime = currentTime - 5000;
Location dirLocation = locationFactory.create("logs");
dirLocation.mkdirs();
// create 20 files, add them in past time range
for (int i = 0; i < 20; i++) {
Location location = dirLocation.append("test" + i);
location.createNew();
fileMetaDataWriter.writeMetaData(logPathIdentifier, startTime + i, startTime + i, location);
}
Assert.assertEquals(20, dirLocation.list().size());
LogCleaner logCleaner = new LogCleaner(fileMetadataCleaner, locationFactory, 100, 60);
logCleaner.run();
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
// all meta data should be deleted
Assert.assertEquals(0, fileMetaDataReader.listFiles(logPathIdentifier, 0, System.currentTimeMillis()).size());
// we are not asserting file existence as the delete could fail and we don't guarantee file deletion.
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class StreamFileJanitorTestBase method setupAuthzConfig.
static CConfiguration setupAuthzConfig() throws IOException {
cConf.setBoolean(Constants.Security.ENABLED, true);
cConf.setBoolean(Constants.Security.Authorization.ENABLED, true);
// we only want to test authorization, but we don't specify principal/keytab, so disable kerberos
cConf.setBoolean(Constants.Security.KERBEROS_ENABLED, false);
cConf.setInt(Constants.Security.Authorization.CACHE_MAX_ENTRIES, 0);
LocationFactory locationFactory = new LocalLocationFactory(new File(tmpFolder.newFolder().toURI()));
Location authorizerJar = AppJarHelper.createDeploymentJar(locationFactory, InMemoryAuthorizer.class);
cConf.set(Constants.Security.Authorization.EXTENSION_JAR_PATH, authorizerJar.toURI().getPath());
// this is needed since now DefaultAuthorizationEnforcer expects this non-null
cConf.set(Constants.Security.CFG_CDAP_MASTER_KRB_PRINCIPAL, UserGroupInformation.getLoginUser().getShortUserName());
return cConf;
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class StreamFileSizeFetcherTest method init.
@BeforeClass
public static void init() throws IOException {
LocationFactory locationFactory = new LocalLocationFactory(TMP_FOLDER.newFolder());
namespacedLocationFactory = new NamespacedLocationFactoryTestClient(cConf, locationFactory);
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class CLIMainTest method createPluginJarFile.
private static File createPluginJarFile(Class<?> cls) throws IOException {
File tmpFolder = TMP_FOLDER.newFolder();
LocationFactory locationFactory = new LocalLocationFactory(tmpFolder);
Location deploymentJar = AppJarHelper.createDeploymentJar(locationFactory, cls);
File appJarFile = new File(tmpFolder, String.format("%s-1.0.jar", cls.getSimpleName()));
Files.copy(Locations.newInputSupplier(deploymentJar), appJarFile);
return appJarFile;
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class DFSStreamFileJanitorTest method init.
@BeforeClass
public static void init() throws IOException {
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
setupAuthzConfig();
Configuration hConf = new Configuration();
hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
final LocationFactory lf = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
namespaceAdmin = new InMemoryNamespaceClient();
final NamespacedLocationFactory nlf = new DefaultNamespacedLocationFactory(cConf, lf, namespaceAdmin);
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new ZKClientModule(), new AbstractModule() {
@Override
protected void configure() {
bind(LocationFactory.class).toInstance(lf);
bind(NamespacedLocationFactory.class).toInstance(nlf);
bind(NamespaceAdmin.class).toInstance(namespaceAdmin);
bind(NamespaceQueryAdmin.class).toInstance(namespaceAdmin);
bind(UGIProvider.class).to(RemoteUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}, new TransactionMetricsModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new DataFabricModules().getDistributedModules(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(MetadataStore.class).to(NoOpMetadataStore.class);
// bind to an in memory implementation for this test since the DefaultOwnerStore uses transaction and in this
// test we are not starting a transaction service
bind(OwnerStore.class).to(InMemoryOwnerStore.class).in(Scopes.SINGLETON);
}
}), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), Modules.override(new StreamAdminModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
// Tests are running in same process, hence no need to have ZK to coordinate
bind(StreamCoordinatorClient.class).to(InMemoryStreamCoordinatorClient.class).in(Scopes.SINGLETON);
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
}
}), new AbstractModule() {
@Override
protected void configure() {
// We don't need notification in this test, hence inject an no-op one
bind(NotificationFeedManager.class).to(NoOpNotificationFeedManager.class);
bind(NamespaceStore.class).to(InMemoryNamespaceStore.class);
}
}, new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule());
locationFactory = injector.getInstance(LocationFactory.class);
namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
namespaceStore = injector.getInstance(NamespaceStore.class);
streamAdmin = injector.getInstance(StreamAdmin.class);
janitor = injector.getInstance(StreamFileJanitor.class);
fileWriterFactory = injector.getInstance(StreamFileWriterFactory.class);
streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
authorizer = injector.getInstance(AuthorizerInstantiator.class).get();
streamCoordinatorClient.startAndWait();
}
Aggregations