use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveCatalogTest method testGetNoSchemaGenericTable.
@Test
public void testGetNoSchemaGenericTable() throws Exception {
ObjectPath hiveObjectPath = new ObjectPath(HiveCatalog.DEFAULT_DB, "testGetNoSchemaGenericTable");
Map<String, String> properties = new HashMap<>();
properties.put(CONNECTOR.key(), "jdbc");
hiveCatalog.createTable(hiveObjectPath, new CatalogTableImpl(TableSchema.builder().build(), properties, null), false);
CatalogBaseTable catalogTable = hiveCatalog.getTable(hiveObjectPath);
assertThat(catalogTable.getSchema()).isEqualTo(TableSchema.builder().build());
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableChangeCol.
private Operation convertAlterTableChangeCol(CatalogBaseTable alteredTable, String[] qualified, HiveParserASTNode ast) throws SemanticException {
String newComment = null;
boolean first = false;
String flagCol = null;
boolean isCascade = false;
// col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name]
// [CASCADE|RESTRICT]
String oldColName = ast.getChild(0).getText();
String newColName = ast.getChild(1).getText();
String newType = HiveParserBaseSemanticAnalyzer.getTypeStringFromAST((HiveParserASTNode) ast.getChild(2));
int childCount = ast.getChildCount();
for (int i = 3; i < childCount; i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
switch(child.getToken().getType()) {
case HiveASTParser.StringLiteral:
newComment = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getText());
break;
case HiveASTParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
flagCol = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
break;
case HiveASTParser.KW_FIRST:
first = true;
break;
case HiveASTParser.TOK_CASCADE:
isCascade = true;
break;
case HiveASTParser.TOK_RESTRICT:
break;
default:
throw new ValidationException("Unsupported token: " + child.getToken() + " for alter table");
}
}
// Validate the operation of renaming a column name.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColName)) {
throw new ValidationException(oldColName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
String tblName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tblName);
CatalogTable oldTable = (CatalogTable) alteredTable;
String oldName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(oldColName);
String newName = HiveParserBaseSemanticAnalyzer.unescapeIdentifier(newColName);
if (oldTable.getPartitionKeys().contains(oldName)) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = oldTable.getSchema();
TableColumn newTableColumn = TableColumn.physical(newName, HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(newType)));
TableSchema newSchema = OperationConverterUtils.changeColumn(oldSchema, oldName, newTableColumn, first, flagCol);
Map<String, String> props = new HashMap<>(oldTable.getOptions());
props.put(ALTER_TABLE_OP, ALTER_COLUMNS.name());
if (isCascade) {
props.put(ALTER_COL_CASCADE, "true");
}
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, oldTable.getPartitionKeys(), props, oldTable.getComment()));
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveDeserializeExceptionTest method parameters.
@Parameterized.Parameters(name = "{1}")
public static Object[] parameters() {
HiveWriterFactory writerFactory = new HiveWriterFactory(new JobConf(), HiveIgnoreKeyTextOutputFormat.class, new SerDeInfo(), TableSchema.builder().build(), new String[0], new Properties(), HiveShimLoader.loadHiveShim(HiveShimLoader.getHiveVersion()), false);
HiveCompactReaderFactory compactReaderFactory = new HiveCompactReaderFactory(new StorageDescriptor(), new Properties(), new JobConf(), new CatalogTableImpl(TableSchema.builder().build(), Collections.emptyMap(), null), HiveShimLoader.getHiveVersion(), RowType.of(DataTypes.INT().getLogicalType()), false);
HiveSourceBuilder builder = new HiveSourceBuilder(new JobConf(), new Configuration(), new ObjectPath("default", "foo"), HiveShimLoader.getHiveVersion(), new CatalogTableImpl(TableSchema.builder().field("i", DataTypes.INT()).build(), Collections.emptyMap(), null));
builder.setPartitions(Collections.singletonList(new HiveTablePartition(new StorageDescriptor(), new Properties())));
HiveSource<RowData> hiveSource = builder.buildWithDefaultBulkFormat();
return new Object[][] { new Object[] { writerFactory, writerFactory.getClass().getSimpleName() }, new Object[] { compactReaderFactory, compactReaderFactory.getClass().getSimpleName() }, new Object[] { hiveSource, hiveSource.getClass().getSimpleName() } };
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class HiveCatalogITCase method testCsvTableViaAPI.
@Test
public void testCsvTableViaAPI() throws Exception {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
tableEnv.getConfig().addConfiguration(new Configuration().set(CoreOptions.DEFAULT_PARALLELISM, 1));
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
final Map<String, String> sourceOptions = new HashMap<>();
sourceOptions.put("connector.type", "filesystem");
sourceOptions.put("connector.path", getClass().getResource("/csv/test.csv").getPath());
sourceOptions.put("format.type", "csv");
CatalogTable source = new CatalogTableImpl(schema, sourceOptions, "Comment.");
Path p = Paths.get(tempFolder.newFolder().getAbsolutePath(), "test.csv");
final Map<String, String> sinkOptions = new HashMap<>();
sinkOptions.put("connector.type", "filesystem");
sinkOptions.put("connector.path", p.toAbsolutePath().toString());
sinkOptions.put("format.type", "csv");
CatalogTable sink = new CatalogTableImpl(schema, sinkOptions, "Comment.");
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sourceTableName), source, false);
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sinkTableName), sink, false);
Table t = tableEnv.sqlQuery(String.format("select * from myhive.`default`.%s", sourceTableName));
List<Row> result = CollectionUtil.iteratorToList(t.execute().collect());
result.sort(Comparator.comparing(String::valueOf));
// assert query result
assertThat(result).containsExactly(Row.of("1", 1), Row.of("2", 2), Row.of("3", 3));
tableEnv.executeSql(String.format("insert into myhive.`default`.%s select * from myhive.`default`.%s", sinkTableName, sourceTableName)).await();
// assert written result
File resultFile = new File(p.toAbsolutePath().toString());
BufferedReader reader = new BufferedReader(new FileReader(resultFile));
String readLine;
for (int i = 0; i < 3; i++) {
readLine = reader.readLine();
assertThat(readLine).isEqualTo(String.format("%d,%d", i + 1, i + 1));
}
// No more line
assertThat(reader.readLine()).isNull();
tableEnv.executeSql(String.format("DROP TABLE %s", sourceTableName));
tableEnv.executeSql(String.format("DROP TABLE %s", sinkTableName));
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class OperationConverterUtils method convertAddReplaceColumns.
public static Operation convertAddReplaceColumns(ObjectIdentifier tableIdentifier, SqlAddReplaceColumns addReplaceColumns, CatalogTable catalogTable, SqlValidator sqlValidator) {
// This is only used by the Hive dialect at the moment. In Hive, only non-partition columns
// can be
// added/replaced and users will only define non-partition columns in the new column list.
// Therefore, we require
// that partitions columns must appear last in the schema (which is inline with Hive).
// Otherwise, we won't be
// able to determine the column positions after the non-partition columns are replaced.
TableSchema oldSchema = catalogTable.getSchema();
int numPartCol = catalogTable.getPartitionKeys().size();
Set<String> lastCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount()).stream().map(TableColumn::getName).collect(Collectors.toSet());
if (!lastCols.equals(new HashSet<>(catalogTable.getPartitionKeys()))) {
throw new ValidationException("ADD/REPLACE COLUMNS on partitioned tables requires partition columns to appear last");
}
// set non-partition columns
TableSchema.Builder builder = TableSchema.builder();
if (!addReplaceColumns.isReplace()) {
List<TableColumn> nonPartCols = oldSchema.getTableColumns().subList(0, oldSchema.getFieldCount() - numPartCol);
for (TableColumn column : nonPartCols) {
builder.add(column);
}
setWatermarkAndPK(builder, catalogTable.getSchema());
}
for (SqlNode sqlNode : addReplaceColumns.getNewColumns()) {
builder.add(toTableColumn((SqlTableColumn) sqlNode, sqlValidator));
}
// set partition columns
List<TableColumn> partCols = oldSchema.getTableColumns().subList(oldSchema.getFieldCount() - numPartCol, oldSchema.getFieldCount());
for (TableColumn column : partCols) {
builder.add(column);
}
// set properties
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(addReplaceColumns.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(builder.build(), catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
}
Aggregations