use of org.apache.flink.table.catalog.exceptions.TableNotExistException in project flink by apache.
the class HiveCatalog method getHiveTable.
@VisibleForTesting
public Table getHiveTable(ObjectPath tablePath) throws TableNotExistException {
try {
Table table = client.getTable(tablePath.getDatabaseName(), tablePath.getObjectName());
boolean isHiveTable;
if (table.getParameters().containsKey(CatalogPropertiesUtil.IS_GENERIC)) {
isHiveTable = !Boolean.parseBoolean(table.getParameters().remove(CatalogPropertiesUtil.IS_GENERIC));
} else {
isHiveTable = !table.getParameters().containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR.key()) && !table.getParameters().containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR_TYPE);
}
// for hive table, we add the connector property
if (isHiveTable) {
table.getParameters().put(CONNECTOR.key(), IDENTIFIER);
}
return table;
} catch (NoSuchObjectException e) {
throw new TableNotExistException(getName(), tablePath);
} catch (TException e) {
throw new CatalogException(String.format("Failed to get table %s from Hive metastore", tablePath.getFullName()), e);
}
}
use of org.apache.flink.table.catalog.exceptions.TableNotExistException in project flink by apache.
the class AbstractJdbcCatalog method getTable.
// ------ tables and views ------
@Override
public CatalogBaseTable getTable(ObjectPath tablePath) throws TableNotExistException, CatalogException {
if (!tableExists(tablePath)) {
throw new TableNotExistException(getName(), tablePath);
}
String dbUrl = baseUrl + tablePath.getDatabaseName();
try (Connection conn = DriverManager.getConnection(dbUrl, username, pwd)) {
DatabaseMetaData metaData = conn.getMetaData();
Optional<UniqueConstraint> primaryKey = getPrimaryKey(metaData, getSchemaName(tablePath), getTableName(tablePath));
PreparedStatement ps = conn.prepareStatement(String.format("SELECT * FROM %s;", getSchemaTableName(tablePath)));
ResultSetMetaData resultSetMetaData = ps.getMetaData();
String[] columnNames = new String[resultSetMetaData.getColumnCount()];
DataType[] types = new DataType[resultSetMetaData.getColumnCount()];
for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) {
columnNames[i - 1] = resultSetMetaData.getColumnName(i);
types[i - 1] = fromJDBCType(tablePath, resultSetMetaData, i);
if (resultSetMetaData.isNullable(i) == ResultSetMetaData.columnNoNulls) {
types[i - 1] = types[i - 1].notNull();
}
}
Schema.Builder schemaBuilder = Schema.newBuilder().fromFields(columnNames, types);
primaryKey.ifPresent(pk -> schemaBuilder.primaryKeyNamed(pk.getName(), pk.getColumns()));
Schema tableSchema = schemaBuilder.build();
Map<String, String> props = new HashMap<>();
props.put(CONNECTOR.key(), IDENTIFIER);
props.put(URL.key(), dbUrl);
props.put(USERNAME.key(), username);
props.put(PASSWORD.key(), pwd);
props.put(TABLE_NAME.key(), getSchemaTableName(tablePath));
return CatalogTable.of(tableSchema, null, Lists.newArrayList(), props);
} catch (Exception e) {
throw new CatalogException(String.format("Failed getting table %s", tablePath.getFullName()), e);
}
}
use of org.apache.flink.table.catalog.exceptions.TableNotExistException in project flink by apache.
the class HiveCatalog method alterTable.
@Override
public void alterTable(ObjectPath tablePath, CatalogBaseTable newCatalogTable, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException {
checkNotNull(tablePath, "tablePath cannot be null");
checkNotNull(newCatalogTable, "newCatalogTable cannot be null");
Table hiveTable;
try {
hiveTable = getHiveTable(tablePath);
} catch (TableNotExistException e) {
if (!ignoreIfNotExists) {
throw e;
}
return;
}
CatalogBaseTable existingTable = instantiateCatalogTable(hiveTable);
if (existingTable.getTableKind() != newCatalogTable.getTableKind()) {
throw new CatalogException(String.format("Table types don't match. Existing table is '%s' and new table is '%s'.", existingTable.getTableKind(), newCatalogTable.getTableKind()));
}
disallowChangeCatalogTableType(existingTable.getOptions(), newCatalogTable.getOptions());
boolean isHiveTable = isHiveTable(hiveTable.getParameters());
if (isHiveTable) {
AlterTableOp op = HiveTableUtil.extractAlterTableOp(newCatalogTable.getOptions());
if (op == null) {
// the alter operation isn't encoded as properties
hiveTable = HiveTableUtil.alterTableViaCatalogBaseTable(tablePath, newCatalogTable, hiveTable, hiveConf, false);
} else {
alterTableViaProperties(op, hiveTable, (CatalogTable) newCatalogTable, hiveTable.getParameters(), newCatalogTable.getOptions(), hiveTable.getSd());
}
} else {
hiveTable = HiveTableUtil.alterTableViaCatalogBaseTable(tablePath, newCatalogTable, hiveTable, hiveConf, ManagedTableListener.isManagedTable(this, newCatalogTable));
}
if (isHiveTable) {
hiveTable.getParameters().remove(CONNECTOR.key());
}
try {
client.alter_table(tablePath.getDatabaseName(), tablePath.getObjectName(), hiveTable);
} catch (TException e) {
throw new CatalogException(String.format("Failed to alter table %s", tablePath.getFullName()), e);
}
}
use of org.apache.flink.table.catalog.exceptions.TableNotExistException in project flink by apache.
the class MySqlCatalogITCase method testGetTables_TableNotExistException.
@Test
public void testGetTables_TableNotExistException() throws TableNotExistException {
String anyTableNotExist = "anyTable";
assertThatThrownBy(() -> catalog.getTable(new ObjectPath(TEST_DB, anyTableNotExist))).satisfies(anyCauseMatches(TableNotExistException.class, String.format("Table (or view) %s.%s does not exist in Catalog", TEST_DB, anyTableNotExist)));
}
use of org.apache.flink.table.catalog.exceptions.TableNotExistException in project flink by apache.
the class GenericInMemoryCatalog method renameTable.
@Override
public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists) throws TableNotExistException, TableAlreadyExistException {
checkNotNull(tablePath);
checkArgument(!StringUtils.isNullOrWhitespaceOnly(newTableName));
if (tableExists(tablePath)) {
ObjectPath newPath = new ObjectPath(tablePath.getDatabaseName(), newTableName);
if (tableExists(newPath)) {
throw new TableAlreadyExistException(getName(), newPath);
} else {
tables.put(newPath, tables.remove(tablePath));
// table statistics
if (tableStats.containsKey(tablePath)) {
tableStats.put(newPath, tableStats.remove(tablePath));
}
// table column statistics
if (tableColumnStats.containsKey(tablePath)) {
tableColumnStats.put(newPath, tableColumnStats.remove(tablePath));
}
// partitions
if (partitions.containsKey(tablePath)) {
partitions.put(newPath, partitions.remove(tablePath));
}
// partition statistics
if (partitionStats.containsKey(tablePath)) {
partitionStats.put(newPath, partitionStats.remove(tablePath));
}
// partition column statistics
if (partitionColumnStats.containsKey(tablePath)) {
partitionColumnStats.put(newPath, partitionColumnStats.remove(tablePath));
}
}
} else if (!ignoreIfNotExists) {
throw new TableNotExistException(getName(), tablePath);
}
}
Aggregations