use of org.apache.hadoop.hive.metastore.api.DataConnector in project hive by apache.
the class CreateDataConnectorAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
boolean ifNotExists = false;
String comment = null;
String url = null;
String type = null;
Map<String, String> props = null;
String connectorName = unescapeIdentifier(root.getChild(0).getText());
for (int i = 1; i < root.getChildCount(); i++) {
ASTNode childNode = (ASTNode) root.getChild(i);
switch(childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATACONNECTORCOMMENT:
comment = unescapeSQLString(childNode.getChild(0).getText());
break;
case HiveParser.TOK_DATACONNECTORPROPERTIES:
props = getProps((ASTNode) childNode.getChild(0));
break;
case HiveParser.TOK_DATACONNECTORURL:
url = unescapeSQLString(childNode.getChild(0).getText());
// outputs.add(toWriteEntity(url));
break;
case HiveParser.TOK_DATACONNECTORTYPE:
type = unescapeSQLString(childNode.getChild(0).getText());
break;
default:
throw new SemanticException("Unrecognized token in CREATE CONNECTOR statement");
}
}
CreateDataConnectorDesc desc = null;
DataConnector connector = new DataConnector(connectorName, type, url);
if (comment != null)
connector.setDescription(comment);
if (props != null)
connector.setParameters(props);
desc = new CreateDataConnectorDesc(connectorName, type, url, ifNotExists, comment, props);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_NO_LOCK));
}
use of org.apache.hadoop.hive.metastore.api.DataConnector in project hive by apache.
the class AbstractAlterDataConnectorAnalyzer method addAlterDataConnectorDesc.
protected void addAlterDataConnectorDesc(AbstractAlterDataConnectorDesc alterDesc) throws SemanticException {
DataConnector connector = getDataConnector(alterDesc.getConnectorName());
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc)));
}
use of org.apache.hadoop.hive.metastore.api.DataConnector in project hive by apache.
the class DropDataConnectorAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String connectorName = unescapeIdentifier(root.getChild(0).getText());
boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
DataConnector connector = getDataConnector(connectorName, !ifExists);
if (connector == null) {
return;
}
inputs.add(new ReadEntity(connector));
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDataConnectorDesc desc = new DropDataConnectorDesc(connectorName, ifExists);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.metastore.api.DataConnector in project hive by apache.
the class TestHiveMetaStore method testDataConnector.
@Test
public void testDataConnector() throws Throwable {
final String connector_name1 = "test_connector1";
final String connector_name2 = "test_connector2";
final String mysql_type = "mysql";
final String mysql_url = "jdbc:mysql://nightly1.apache.org:3306/hive1";
final String postgres_type = "postgres";
final String postgres_url = "jdbc:postgresql://localhost:5432";
try {
DataConnector connector = new DataConnector(connector_name1, mysql_type, mysql_url);
Map<String, String> params = new HashMap<>();
params.put(AbstractJDBCConnectorProvider.JDBC_USERNAME, "hive");
params.put(AbstractJDBCConnectorProvider.JDBC_PASSWORD, "hive");
connector.setParameters(params);
client.createDataConnector(connector);
DataConnector dConn = client.getDataConnector(connector_name1);
assertNotNull(dConn);
assertEquals("name of returned data connector is different from that of inserted connector", connector_name1, dConn.getName());
assertEquals("type of data connector returned is different from the type inserted", mysql_type, dConn.getType());
assertEquals("url of the data connector returned is different from the url inserted", mysql_url, dConn.getUrl());
// assertEquals(SecurityUtils.getUser(), dConn.getOwnerName());
assertEquals(PrincipalType.USER, dConn.getOwnerType());
assertNotEquals("Size of data connector parameters not as expected", 0, dConn.getParametersSize());
try {
client.createDataConnector(connector);
fail("Creating duplicate connector should fail");
} catch (Exception e) {
/* as expected */
}
connector = new DataConnector(connector_name2, postgres_type, postgres_url);
params = new HashMap<>();
params.put(AbstractJDBCConnectorProvider.JDBC_USERNAME, "hive");
params.put(AbstractJDBCConnectorProvider.JDBC_PASSWORD, "hive");
connector.setParameters(params);
client.createDataConnector(connector);
dConn = client.getDataConnector(connector_name2);
assertEquals("name of returned data connector is different from that of inserted connector", connector_name2, dConn.getName());
assertEquals("type of data connector returned is different from the type inserted", postgres_type, dConn.getType());
assertEquals("url of the data connector returned is different from the url inserted", postgres_url, dConn.getUrl());
List<String> connectors = client.getAllDataConnectorNames();
assertEquals("Number of dataconnectors returned is not as expected", 2, connectors.size());
DataConnector connector1 = new DataConnector(connector);
connector1.setUrl(mysql_url);
client.alterDataConnector(connector.getName(), connector1);
dConn = client.getDataConnector(connector.getName());
assertEquals("url of the data connector returned is different from the url inserted", mysql_url, dConn.getUrl());
// alter data connector parameters
params.put(AbstractJDBCConnectorProvider.JDBC_NUM_PARTITIONS, "5");
connector1.setParameters(params);
client.alterDataConnector(connector.getName(), connector1);
dConn = client.getDataConnector(connector.getName());
assertEquals("Size of data connector parameters not as expected", 3, dConn.getParametersSize());
// alter data connector parameters
connector1.setOwnerName("hiveadmin");
connector1.setOwnerType(PrincipalType.ROLE);
client.alterDataConnector(connector.getName(), connector1);
dConn = client.getDataConnector(connector.getName());
assertEquals("Data connector owner name not as expected", "hiveadmin", dConn.getOwnerName());
assertEquals("Data connector owner type not as expected", PrincipalType.ROLE, dConn.getOwnerType());
client.dropDataConnector(connector_name1, false, false);
connectors = client.getAllDataConnectorNames();
assertEquals("Number of dataconnectors returned is not as expected", 1, connectors.size());
client.dropDataConnector(connector_name2, false, false);
connectors = client.getAllDataConnectorNames();
assertEquals("Number of dataconnectors returned is not as expected", 0, connectors.size());
} catch (Throwable e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testDataConnector() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.DataConnector in project hive by apache.
the class CreateDatabaseAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String databaseName = unescapeIdentifier(root.getChild(0).getText());
boolean ifNotExists = false;
String comment = null;
String locationUri = null;
String managedLocationUri = null;
String type = DatabaseType.NATIVE.name();
String connectorName = null;
Map<String, String> props = null;
for (int i = 1; i < root.getChildCount(); i++) {
ASTNode childNode = (ASTNode) root.getChild(i);
switch(childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATABASECOMMENT:
comment = unescapeSQLString(childNode.getChild(0).getText());
break;
case HiveParser.TOK_DATABASEPROPERTIES:
props = getProps((ASTNode) childNode.getChild(0));
break;
case HiveParser.TOK_DATABASELOCATION:
locationUri = unescapeSQLString(childNode.getChild(0).getText());
outputs.add(toWriteEntity(locationUri));
break;
case HiveParser.TOK_DATABASE_MANAGEDLOCATION:
managedLocationUri = unescapeSQLString(childNode.getChild(0).getText());
outputs.add(toWriteEntity(managedLocationUri));
break;
case HiveParser.TOK_DATACONNECTOR:
type = DatabaseType.REMOTE.name();
ASTNode nextNode = (ASTNode) root.getChild(i);
connectorName = ((ASTNode) nextNode).getChild(0).getText();
DataConnector connector = getDataConnector(connectorName, true);
if (connector == null) {
throw new SemanticException("Cannot retrieve connector with name: " + connectorName);
}
inputs.add(new ReadEntity(connector));
break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
}
}
CreateDatabaseDesc desc = null;
Database database = new Database(databaseName, comment, locationUri, props);
if (type.equalsIgnoreCase(DatabaseType.NATIVE.name())) {
desc = new CreateDatabaseDesc(databaseName, comment, locationUri, managedLocationUri, ifNotExists, props);
database.setType(DatabaseType.NATIVE);
// database = new Database(databaseName, comment, locationUri, props);
if (managedLocationUri != null) {
database.setManagedLocationUri(managedLocationUri);
}
} else {
String remoteDbName = databaseName;
if (// TODO finalize the property name
props != null && props.get("connector.remoteDbName") != null)
remoteDbName = props.get("connector.remoteDbName");
desc = new CreateDatabaseDesc(databaseName, comment, locationUri, null, ifNotExists, props, type, connectorName, remoteDbName);
database.setConnector_name(connectorName);
database.setType(DatabaseType.REMOTE);
database.setRemote_dbname(remoteDbName);
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
}
Aggregations