use of org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException in project iceberg by apache.
the class SparkCatalog method stageCreate.
@Override
public StagedTable stageCreate(Identifier ident, StructType schema, Transform[] transforms, Map<String, String> properties) throws TableAlreadyExistsException {
Schema icebergSchema = SparkSchemaUtil.convert(schema, useTimestampsWithoutZone);
try {
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Transaction transaction = builder.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms)).withLocation(properties.get("location")).withProperties(Spark3Util.rebuildCreateProperties(properties)).createTransaction();
return new StagedSparkTable(transaction);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(ident);
}
}
use of org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException in project iceberg by apache.
the class SparkCatalog method createTable.
@Override
public SparkTable createTable(Identifier ident, StructType schema, Transform[] transforms, Map<String, String> properties) throws TableAlreadyExistsException {
Schema icebergSchema = SparkSchemaUtil.convert(schema, useTimestampsWithoutZone);
try {
Catalog.TableBuilder builder = newBuilder(ident, icebergSchema);
Table icebergTable = builder.withPartitionSpec(Spark3Util.toPartitionSpec(icebergSchema, transforms)).withLocation(properties.get("location")).withProperties(Spark3Util.rebuildCreateProperties(properties)).create();
return new SparkTable(icebergTable, !cacheEnabled);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(ident);
}
}
use of org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException in project iceberg by apache.
the class SparkCatalog method renameTable.
@Override
public void renameTable(Identifier from, Identifier to) throws NoSuchTableException, TableAlreadyExistsException {
try {
checkNotPathIdentifier(from, "renameTable");
checkNotPathIdentifier(to, "renameTable");
icebergCatalog.renameTable(buildIdentifier(from), buildIdentifier(to));
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
throw new NoSuchTableException(from);
} catch (AlreadyExistsException e) {
throw new TableAlreadyExistsException(to);
}
}
use of org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException in project iceberg by apache.
the class SparkSessionCatalog method stageCreateOrReplace.
@Override
public StagedTable stageCreateOrReplace(Identifier ident, StructType schema, Transform[] partitions, Map<String, String> properties) throws NoSuchNamespaceException {
String provider = properties.get("provider");
TableCatalog catalog;
if (useIceberg(provider)) {
if (asStagingCatalog != null) {
return asStagingCatalog.stageCreateOrReplace(ident, schema, partitions, properties);
}
catalog = icebergCatalog;
} else {
catalog = getSessionCatalog();
}
// drop the table if it exists
catalog.dropTable(ident);
try {
// create the table with the session catalog, then wrap it in a staged table that will delete to roll back
Table sessionCatalogTable = catalog.createTable(ident, schema, partitions, properties);
return new RollbackStagedTable(catalog, ident, sessionCatalogTable);
} catch (TableAlreadyExistsException e) {
// the table was deleted, but now already exists again. retry the replace.
return stageCreateOrReplace(ident, schema, partitions, properties);
}
}
use of org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException in project iceberg by apache.
the class SparkSessionCatalog method stageReplace.
@Override
public StagedTable stageReplace(Identifier ident, StructType schema, Transform[] partitions, Map<String, String> properties) throws NoSuchNamespaceException, NoSuchTableException {
String provider = properties.get("provider");
TableCatalog catalog;
if (useIceberg(provider)) {
if (asStagingCatalog != null) {
return asStagingCatalog.stageReplace(ident, schema, partitions, properties);
}
catalog = icebergCatalog;
} else {
catalog = getSessionCatalog();
}
// attempt to drop the table and fail if it doesn't exist
if (!catalog.dropTable(ident)) {
throw new NoSuchTableException(ident);
}
try {
// create the table with the session catalog, then wrap it in a staged table that will delete to roll back
Table table = catalog.createTable(ident, schema, partitions, properties);
return new RollbackStagedTable(catalog, ident, table);
} catch (TableAlreadyExistsException e) {
// the table was deleted, but now already exists again. retry the replace.
return stageReplace(ident, schema, partitions, properties);
}
}
Aggregations