use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class HBaseMessageTableTestRun method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
hConf = HBASE_TEST_BASE.getConfiguration();
hConf.set(HBaseTableUtil.CFG_HBASE_TABLE_COMPRESSION, HBaseTableUtil.CompressionType.NONE.name());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().getAbsolutePath());
cConf.set(Constants.CFG_HDFS_NAMESPACE, cConf.get(Constants.CFG_LOCAL_DATA_DIR));
cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
tableUtil = new HBaseTableUtilFactory(cConf).get();
ddlExecutor = new HBaseDDLExecutorFactory(cConf, hConf).get();
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NamespaceId.SYSTEM));
LocationFactory locationFactory = getInjector().getInstance(LocationFactory.class);
tableFactory = new HBaseTableFactory(cConf, hConf, tableUtil, locationFactory);
new ConfigurationWriter(hConf, cConf).write(ConfigurationReader.Type.DEFAULT, cConf);
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class HBaseMetadataTableTestRun method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
hConf = HBASE_TEST_BASE.getConfiguration();
hConf.set(HBaseTableUtil.CFG_HBASE_TABLE_COMPRESSION, HBaseTableUtil.CompressionType.NONE.name());
tableUtil = new HBaseTableUtilFactory(cConf).get();
ddlExecutor = new HBaseDDLExecutorFactory(cConf, hConf).get();
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NamespaceId.SYSTEM));
LocationFactory locationFactory = getInjector().getInstance(LocationFactory.class);
tableFactory = new HBaseTableFactory(cConf, hConf, tableUtil, locationFactory);
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class HBaseTableCoprocessorTestRun method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
hConf = HBASE_TEST_BASE.getConfiguration();
hConf.set(HBaseTableUtil.CFG_HBASE_TABLE_COMPRESSION, HBaseTableUtil.CompressionType.NONE.name());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().getAbsolutePath());
cConf.set(Constants.CFG_HDFS_NAMESPACE, cConf.get(Constants.CFG_LOCAL_DATA_DIR));
cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
// Reduce the metadata cache refresh frequency for unit tests
cConf.set(Constants.MessagingSystem.COPROCESSOR_METADATA_CACHE_UPDATE_FREQUENCY_SECONDS, Integer.toString(METADATA_CACHE_EXPIRY));
hBaseAdmin = HBASE_TEST_BASE.getHBaseAdmin();
hBaseAdmin.getConfiguration().set(HBaseTableUtil.CFG_HBASE_TABLE_COMPRESSION, HBaseTableUtil.CompressionType.NONE.name());
tableUtil = new HBaseTableUtilFactory(cConf).get();
ddlExecutor = new HBaseDDLExecutorFactory(cConf, hConf).get();
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NamespaceId.SYSTEM));
LocationFactory locationFactory = getInjector().getInstance(LocationFactory.class);
tableFactory = new HBaseTableFactory(cConf, hBaseAdmin.getConfiguration(), tableUtil, locationFactory);
new ConfigurationWriter(hConf, cConf).write(ConfigurationReader.Type.DEFAULT, cConf);
// write an initial transaction snapshot
invalidList.addAll(ImmutableList.of(V[3], V[5], V[7]));
TransactionSnapshot txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7], invalidList, // this will set visibility upper bound to V[6]
Maps.newTreeMap(ImmutableSortedMap.of(V[6], new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE, TransactionManager.InProgressType.SHORT))), new HashMap<Long, TransactionManager.ChangeSet>(), new TreeMap<Long, TransactionManager.ChangeSet>());
HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(hConf, new SnapshotCodecProvider(hConf), new TxMetricsCollector());
tmpStorage.startAndWait();
tmpStorage.writeSnapshot(txSnapshot);
tmpStorage.stopAndWait();
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class SparkDriverService method createCredentialsUpdater.
/**
* Creates a {@link SparkCredentialsUpdater} for {@link Credentials} in secure environment. If security is disable,
* or failure to create one due to {@link IOException} from {@link LocationFactory}, {@code null} will be returned.
*/
@Nullable
private SparkCredentialsUpdater createCredentialsUpdater(Configuration hConf, SparkExecutionClient client) {
try {
SparkConf sparkConf = new SparkConf();
long updateIntervalMs = sparkConf.getLong("spark.yarn.token.renewal.interval", -1L);
if (updateIntervalMs <= 0) {
return null;
}
// This env variable is set by Spark for all known Spark versions
// If it is missing, exception will be thrown
URI stagingURI = URI.create(System.getenv("SPARK_YARN_STAGING_DIR"));
LocationFactory lf = new FileContextLocationFactory(hConf);
Location credentialsDir = stagingURI.isAbsolute() ? lf.create(stagingURI.getPath()) : lf.getHomeLocation().append(stagingURI.getPath());
LOG.info("Credentials DIR: {}", credentialsDir);
int daysToKeepFiles = sparkConf.getInt("spark.yarn.credentials.file.retention.days", 5);
int numFilesToKeep = sparkConf.getInt("spark.yarn.credentials.file.retention.count", 5);
Location credentialsFile = credentialsDir.append("credentials-" + UUID.randomUUID());
// Update this property so that the executor will pick it up. It can't get set from the client side,
// otherwise the AM process will try to look for keytab file
SparkRuntimeEnv.setProperty("spark.yarn.credentials.file", credentialsFile.toURI().toString());
return new SparkCredentialsUpdater(createCredentialsSupplier(client, credentialsDir), credentialsDir, credentialsFile.getName(), updateIntervalMs, TimeUnit.DAYS.toMillis(daysToKeepFiles), numFilesToKeep);
} catch (IOException e) {
LOG.warn("Failed to create credentials updater. Credentials update disabled", e);
return null;
}
}
use of org.apache.twill.filesystem.LocationFactory in project cdap by caskdata.
the class StreamFileOffset method createIndexLocation.
/**
* Creates the index file location from the event file location.
*
* @param eventLocation Location for the event file.
* @return Location of the index file.
*/
private Location createIndexLocation(Location eventLocation) {
LocationFactory factory = eventLocation.getLocationFactory();
String eventPath = eventLocation.toURI().toString();
int extLength = StreamFileType.EVENT.getSuffix().length();
return factory.create(URI.create(String.format("%s%s", eventPath.substring(0, eventPath.length() - extLength), StreamFileType.INDEX.getSuffix())));
}
Aggregations