use of org.apache.drill.exec.store.StoragePluginRegistry in project drill by apache.
the class TemporaryTablesAutomaticDropTest method testAutomaticDropOfSeveralSessionTemporaryLocations.
@Test
public void testAutomaticDropOfSeveralSessionTemporaryLocations() throws Exception {
final File firstSessionTemporaryLocation = createAndCheckSessionTemporaryLocation("first_location", dirTestWatcher.getDfsTestTmpDir());
final StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
final File tempDir = DirTestWatcher.createTempDir(dirTestWatcher.getDir());
try {
StoragePluginTestUtils.updateSchemaLocation(StoragePluginTestUtils.DFS_PLUGIN_NAME, pluginRegistry, tempDir);
final File secondSessionTemporaryLocation = createAndCheckSessionTemporaryLocation("second_location", tempDir);
updateClient("new_client");
assertFalse("First session temporary location should be absent", firstSessionTemporaryLocation.exists());
assertFalse("Second session temporary location should be absent", secondSessionTemporaryLocation.exists());
} finally {
StoragePluginTestUtils.updateSchemaLocation(StoragePluginTestUtils.DFS_PLUGIN_NAME, pluginRegistry, dirTestWatcher.getDfsTestTmpDir());
}
}
use of org.apache.drill.exec.store.StoragePluginRegistry in project drill by apache.
the class TestCTTAS method init.
@BeforeClass
public static void init() throws Exception {
File tmp2 = dirTestWatcher.makeSubDir(Paths.get("tmp2"));
StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
FileSystemConfig pluginConfig = (FileSystemConfig) pluginRegistry.getPlugin(DFS_PLUGIN_NAME).getConfig();
Map<String, WorkspaceConfig> newWorkspaces = new HashMap<>();
Optional.ofNullable(pluginConfig.getWorkspaces()).ifPresent(newWorkspaces::putAll);
newWorkspaces.put(temp2_wk, new WorkspaceConfig(tmp2.getAbsolutePath(), true, null, false));
FileSystemConfig newPluginConfig = new FileSystemConfig(pluginConfig.getConnection(), pluginConfig.getConfig(), newWorkspaces, pluginConfig.getFormats(), PlainCredentialsProvider.EMPTY_CREDENTIALS_PROVIDER);
newPluginConfig.setEnabled(pluginConfig.isEnabled());
pluginRegistry.put(DFS_PLUGIN_NAME, newPluginConfig);
}
use of org.apache.drill.exec.store.StoragePluginRegistry in project drill by apache.
the class MaprDBTestsSuite method createPluginAndGetConf.
public static Configuration createPluginAndGetConf(DrillbitContext ctx) throws Exception {
if (!pluginsUpdated) {
synchronized (MaprDBTestsSuite.class) {
if (!pluginsUpdated) {
StoragePluginRegistry pluginRegistry = ctx.getStorage();
String pluginConfStr = "{" + " \"type\": \"file\"," + " \"enabled\": true," + " \"connection\": \"maprfs:///\"," + " \"workspaces\": {" + " \"default\": {" + " \"location\": \"/tmp\"," + " \"writable\": false," + " \"defaultInputFormat\": \"maprdb\"" + " }," + " \"tmp\": {" + " \"location\": \"/tmp\"," + " \"writable\": true," + " \"defaultInputFormat\": \"parquet\"" + " }," + " \"root\": {" + " \"location\": \"/\"," + " \"writable\": false," + " \"defaultInputFormat\": \"maprdb\"" + " }" + " }," + " \"formats\": {" + " \"maprdb\": {" + " \"type\": \"maprdb\"," + " \"allTextMode\": false," + " \"readAllNumbersAsDouble\": false," + " \"enablePushdown\": true" + " }," + " \"parquet\": {" + " \"type\": \"parquet\"" + " }," + " \"streams\": {" + " \"type\": \"streams\"" + " }" + " }" + "}";
FileSystemConfig pluginConfig = ctx.getLpPersistence().getMapper().readValue(pluginConfStr, FileSystemConfig.class);
// create the plugin with "hbase" name so that we can run HBase unit tests against them
pluginRegistry.put("hbase", pluginConfig);
}
}
}
return conf;
}
use of org.apache.drill.exec.store.StoragePluginRegistry in project drill by apache.
the class SecuredPhoenixBaseTest method startSecuredDrillCluster.
private static void startSecuredDrillCluster() throws Exception {
logFixture = LogFixture.builder().toConsole().logger(QueryServerEnvironment.class, CURRENT_LOG_LEVEL).logger(SecuredPhoenixBaseTest.class, CURRENT_LOG_LEVEL).logger(KerberosFactory.class, CURRENT_LOG_LEVEL).logger(Krb5LoginModule.class, CURRENT_LOG_LEVEL).logger(QueryServer.class, CURRENT_LOG_LEVEL).logger(ServerAuthenticationHandler.class, CURRENT_LOG_LEVEL).build();
Map.Entry<String, File> user1 = environment.getUser(1);
Map.Entry<String, File> user2 = environment.getUser(2);
Map.Entry<String, File> user3 = environment.getUser(3);
// until DirTestWatcher ClassRule is implemented for JUnit5
dirTestWatcher.start(SecuredPhoenixTestSuite.class);
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).configProperty(ExecConstants.USER_AUTHENTICATION_ENABLED, true).configProperty(ExecConstants.USER_AUTHENTICATOR_IMPL, UserAuthenticatorTestImpl.TYPE).configNonStringProperty(ExecConstants.AUTHENTICATION_MECHANISMS, Lists.newArrayList("kerberos")).configProperty(ExecConstants.IMPERSONATION_ENABLED, true).configProperty(ExecConstants.BIT_AUTHENTICATION_ENABLED, true).configProperty(ExecConstants.BIT_AUTHENTICATION_MECHANISM, "kerberos").configProperty(ExecConstants.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting()).configProperty(ExecConstants.SERVICE_KEYTAB_LOCATION, environment.getServiceKeytab().getAbsolutePath()).configClientProperty(DrillProperties.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting()).configClientProperty(DrillProperties.USER, user1.getKey()).configClientProperty(DrillProperties.KEYTAB, user1.getValue().getAbsolutePath());
startCluster(builder);
Properties user2ClientProperties = new Properties();
user2ClientProperties.setProperty(DrillProperties.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting());
user2ClientProperties.setProperty(DrillProperties.USER, user2.getKey());
user2ClientProperties.setProperty(DrillProperties.KEYTAB, user2.getValue().getAbsolutePath());
cluster.addClientFixture(user2ClientProperties);
Properties user3ClientProperties = new Properties();
user3ClientProperties.setProperty(DrillProperties.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting());
user3ClientProperties.setProperty(DrillProperties.USER, user3.getKey());
user3ClientProperties.setProperty(DrillProperties.KEYTAB, user3.getValue().getAbsolutePath());
cluster.addClientFixture(user3ClientProperties);
Map<String, Object> phoenixProps = new HashMap<>();
phoenixProps.put("phoenix.query.timeoutMs", 90000);
phoenixProps.put("phoenix.query.keepAliveMs", "30000");
phoenixProps.put("phoenix.queryserver.withRemoteUserExtractor", true);
StoragePluginRegistry registry = cluster.drillbit().getContext().getStorage();
final String doAsUrl = String.format(getUrlTemplate(), "$user");
logger.debug("Phoenix Query Server URL: {}", environment.getPqsUrl());
PhoenixStoragePluginConfig config = new PhoenixStoragePluginConfig(null, 0, null, null, doAsUrl, null, phoenixProps);
config.setEnabled(true);
registry.put(PhoenixStoragePluginConfig.NAME + "123", config);
}
use of org.apache.drill.exec.store.StoragePluginRegistry in project drill by apache.
the class TestOpenTSDBPlugin method setup.
@BeforeClass
public static void setup() throws Exception {
startCluster(ClusterFixture.builder(dirTestWatcher));
portNumber = QueryTestUtil.getFreePortNumber(10_000, 200);
final StoragePluginRegistry pluginRegistry = cluster.drillbit().getContext().getStorage();
OpenTSDBStoragePluginConfig storagePluginConfig = new OpenTSDBStoragePluginConfig(String.format("http://localhost:%s", portNumber));
storagePluginConfig.setEnabled(true);
pluginRegistry.put(OpenTSDBStoragePluginConfig.NAME, storagePluginConfig);
}
Aggregations