use of org.apache.flink.table.api.constraints.UniqueConstraint in project flink by apache.
the class HiveCatalog method createTable.
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException {
checkNotNull(tablePath, "tablePath cannot be null");
checkNotNull(table, "table cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName());
}
boolean managedTable = ManagedTableListener.isManagedTable(this, table);
Table hiveTable = HiveTableUtil.instantiateHiveTable(tablePath, table, hiveConf, managedTable);
UniqueConstraint pkConstraint = null;
List<String> notNullCols = new ArrayList<>();
boolean isHiveTable = isHiveTable(table.getOptions());
if (isHiveTable) {
pkConstraint = table.getSchema().getPrimaryKey().orElse(null);
String nnColStr = hiveTable.getParameters().remove(NOT_NULL_COLS);
if (nnColStr != null) {
notNullCols.addAll(Arrays.asList(nnColStr.split(HiveDDLUtils.COL_DELIMITER)));
} else {
for (int i = 0; i < table.getSchema().getFieldDataTypes().length; i++) {
if (!table.getSchema().getFieldDataTypes()[i].getLogicalType().isNullable()) {
notNullCols.add(table.getSchema().getFieldNames()[i]);
}
}
}
// remove the 'connector' option for hive table
hiveTable.getParameters().remove(CONNECTOR.key());
}
try {
if (pkConstraint != null || !notNullCols.isEmpty()) {
// extract constraint traits from table properties
String pkTraitStr = hiveTable.getParameters().remove(PK_CONSTRAINT_TRAIT);
byte pkTrait = pkTraitStr == null ? HiveDDLUtils.defaultTrait() : Byte.parseByte(pkTraitStr);
List<Byte> pkTraits = Collections.nCopies(pkConstraint == null ? 0 : pkConstraint.getColumns().size(), pkTrait);
List<Byte> nnTraits;
String nnTraitsStr = hiveTable.getParameters().remove(NOT_NULL_CONSTRAINT_TRAITS);
if (nnTraitsStr != null) {
String[] traits = nnTraitsStr.split(HiveDDLUtils.COL_DELIMITER);
Preconditions.checkArgument(traits.length == notNullCols.size(), "Number of NOT NULL columns and constraint traits mismatch");
nnTraits = Arrays.stream(traits).map(Byte::new).collect(Collectors.toList());
} else {
nnTraits = Collections.nCopies(notNullCols.size(), HiveDDLUtils.defaultTrait());
}
client.createTableWithConstraints(hiveTable, hiveConf, pkConstraint, pkTraits, notNullCols, nnTraits);
} else {
client.createTable(hiveTable);
}
} catch (AlreadyExistsException e) {
if (!ignoreIfExists) {
throw new TableAlreadyExistException(getName(), tablePath, e);
}
} catch (TException e) {
throw new CatalogException(String.format("Failed to create table %s", tablePath.getFullName()), e);
}
}
use of org.apache.flink.table.api.constraints.UniqueConstraint in project flink by apache.
the class HiveTableUtil method createTableSchema.
/**
* Create a Flink's TableSchema from Hive table's columns and partition keys.
*/
public static TableSchema createTableSchema(List<FieldSchema> cols, List<FieldSchema> partitionKeys, Set<String> notNullColumns, UniqueConstraint primaryKey) {
List<FieldSchema> allCols = new ArrayList<>(cols);
allCols.addAll(partitionKeys);
String[] colNames = new String[allCols.size()];
DataType[] colTypes = new DataType[allCols.size()];
for (int i = 0; i < allCols.size(); i++) {
FieldSchema fs = allCols.get(i);
colNames[i] = fs.getName();
colTypes[i] = HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()));
if (notNullColumns.contains(colNames[i])) {
colTypes[i] = colTypes[i].notNull();
}
}
TableSchema.Builder builder = TableSchema.builder().fields(colNames, colTypes);
if (primaryKey != null) {
builder.primaryKey(primaryKey.getName(), primaryKey.getColumns().toArray(new String[0]));
}
return builder.build();
}
use of org.apache.flink.table.api.constraints.UniqueConstraint in project flink by apache.
the class TableEnvHiveConnectorITCase method testPKConstraint.
@Test
public void testPKConstraint() throws Exception {
// While PK constraints are supported since Hive 2.1.0, the constraints cannot be RELY in
// 2.x versions.
// So let's only test for 3.x.
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
// test rely PK constraints
tableEnv.executeSql("create table db1.tbl1 (x tinyint,y smallint,z int, primary key (x,z) disable novalidate rely)");
CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl1"));
TableSchema tableSchema = catalogTable.getSchema();
assertTrue(tableSchema.getPrimaryKey().isPresent());
UniqueConstraint pk = tableSchema.getPrimaryKey().get();
assertEquals(2, pk.getColumns().size());
assertTrue(pk.getColumns().containsAll(Arrays.asList("x", "z")));
// test norely PK constraints
tableEnv.executeSql("create table db1.tbl2 (x tinyint,y smallint, primary key (x) disable norely)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl2"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
// test table w/o PK
tableEnv.executeSql("create table db1.tbl3 (x tinyint)");
catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl3"));
tableSchema = catalogTable.getSchema();
assertFalse(tableSchema.getPrimaryKey().isPresent());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.constraints.UniqueConstraint in project flink by apache.
the class TableSchemaUtils method dropConstraint.
/**
* Creates a new schema but drop the constraint with given name.
*/
public static TableSchema dropConstraint(TableSchema oriSchema, String constraintName) {
// Validate the constraint name is valid.
Optional<UniqueConstraint> uniqueConstraintOpt = oriSchema.getPrimaryKey();
if (!uniqueConstraintOpt.isPresent() || !uniqueConstraintOpt.get().getName().equals(constraintName)) {
throw new ValidationException(String.format("Constraint %s to drop does not exist", constraintName));
}
TableSchema.Builder builder = builderWithGivenColumns(oriSchema.getTableColumns());
// Copy watermark specification.
for (WatermarkSpec wms : oriSchema.getWatermarkSpecs()) {
builder.watermark(wms.getRowtimeAttribute(), wms.getWatermarkExpr(), wms.getWatermarkExprOutputType());
}
return builder.build();
}
Aggregations